finish handleDelayedTask
parent
daa0b5a0ba
commit
3b550dd8e5
|
@ -30,6 +30,8 @@ type etcdBroker struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
client *clientv3.Client
|
client *clientv3.Client
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
keyMap map[string]struct{}
|
||||||
|
mtx sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// New ..
|
// New ..
|
||||||
|
@ -50,6 +52,7 @@ func New(ctx context.Context, conf *config.Config) (iface.Broker, error) {
|
||||||
Broker: common.NewBroker(conf),
|
Broker: common.NewBroker(conf),
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
client: client,
|
client: client,
|
||||||
|
keyMap: map[string]struct{}{},
|
||||||
}
|
}
|
||||||
|
|
||||||
return &broker, nil
|
return &broker, nil
|
||||||
|
@ -68,12 +71,36 @@ func (b *etcdBroker) StartConsuming(consumerTag string, concurrency int, taskPro
|
||||||
// Channel to which we will push tasks ready for processing by worker
|
// Channel to which we will push tasks ready for processing by worker
|
||||||
deliveries := make(chan Delivery)
|
deliveries := make(chan Delivery)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(b.ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// list watch task
|
||||||
|
b.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer b.wg.Done()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
// A way to stop this goroutine from b.StopConsuming
|
||||||
|
case <-b.GetStopChan():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
err := b.listWatchTasks(ctx, getQueue(b.GetConfig(), taskProcessor))
|
||||||
|
if err != nil {
|
||||||
|
log.ERROR.Printf("handle list watch task err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}()
|
||||||
|
|
||||||
// A receiving goroutine keeps popping messages from the queue by BLPOP
|
// A receiving goroutine keeps popping messages from the queue by BLPOP
|
||||||
// If the message is valid and can be unmarshaled into a proper structure
|
// If the message is valid and can be unmarshaled into a proper structure
|
||||||
// we send it to the deliveries channel
|
// we send it to the deliveries channel
|
||||||
b.wg.Add(1)
|
b.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer b.wg.Done()
|
defer b.wg.Done()
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
@ -107,6 +134,8 @@ func (b *etcdBroker) StartConsuming(consumerTag string, concurrency int, taskPro
|
||||||
b.wg.Add(1)
|
b.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer b.wg.Done()
|
defer b.wg.Done()
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Second)
|
ticker := time.NewTicker(time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
@ -117,7 +146,7 @@ func (b *etcdBroker) StartConsuming(consumerTag string, concurrency int, taskPro
|
||||||
return
|
return
|
||||||
|
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
err := b.handleDelayedTask()
|
err := b.handleDelayedTask(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ERROR.Printf("handleDelayedTask err: %s", err)
|
log.ERROR.Printf("handleDelayedTask err: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -208,7 +237,8 @@ func (b *etcdBroker) Publish(ctx context.Context, signature *tasks.Signature) er
|
||||||
// Check the ETA signature field, if it is set and it is in the future,
|
// Check the ETA signature field, if it is set and it is in the future,
|
||||||
// delay the task
|
// delay the task
|
||||||
if signature.ETA != nil && signature.ETA.After(now) {
|
if signature.ETA != nil && signature.ETA.After(now) {
|
||||||
key = fmt.Sprintf("/machinery/v2/broker/delayed_tasks/eta-%d/%s/%s", signature.ETA.UnixMilli(), signature.RoutingKey, signature.UUID)
|
key = fmt.Sprintf("/machinery/v2/broker/delayed_tasks/eta-%d/%s/%s",
|
||||||
|
signature.ETA.UnixMilli(), signature.RoutingKey, signature.UUID)
|
||||||
_, err = b.client.Put(ctx, key, string(msg))
|
_, err = b.client.Put(ctx, key, string(msg))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -245,7 +275,7 @@ func (b *etcdBroker) GetPendingTasks(queue string) ([]*tasks.Signature, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
key := fmt.Sprintf("/machinery/v2/broker/pending_tasks/%s", queue)
|
key := fmt.Sprintf("/machinery/v2/broker/pending_tasks/%s", queue)
|
||||||
items, err := b.getTasks(context.Background(), key)
|
items, err := b.getTasks(b.ctx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -257,7 +287,7 @@ func (b *etcdBroker) GetPendingTasks(queue string) ([]*tasks.Signature, error) {
|
||||||
func (b *etcdBroker) GetDelayedTasks() ([]*tasks.Signature, error) {
|
func (b *etcdBroker) GetDelayedTasks() ([]*tasks.Signature, error) {
|
||||||
key := "/machinery/v2/broker/delayed_tasks"
|
key := "/machinery/v2/broker/delayed_tasks"
|
||||||
|
|
||||||
items, err := b.getTasks(context.Background(), key)
|
items, err := b.getTasks(b.ctx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -266,33 +296,72 @@ func (b *etcdBroker) GetDelayedTasks() ([]*tasks.Signature, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *etcdBroker) nextTask(queue string) (Delivery, error) {
|
func (b *etcdBroker) nextTask(queue string) (Delivery, error) {
|
||||||
keyPrefix := fmt.Sprintf("/machinery/v2/broker/pending_tasks/%s", queue)
|
for k := range b.keyMap {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
|
if !strings.Contains(k, queue) {
|
||||||
defer cancel()
|
continue
|
||||||
|
|
||||||
item, err := getFirstItem(ctx, b.client, keyPrefix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
signature := new(tasks.Signature)
|
d, err := NewDelivery(b.ctx, b.client, k)
|
||||||
decoder := json.NewDecoder(bytes.NewReader(item))
|
|
||||||
decoder.UseNumber()
|
|
||||||
if err := decoder.Decode(signature); err != nil {
|
|
||||||
return nil, errs.NewErrCouldNotUnmarshalTaskSignature(item, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := NewDelivery(b.ctx, b.client, "")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *etcdBroker) handleDelayedTask() error {
|
time.Sleep(time.Second)
|
||||||
|
return b.nextTask(queue)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *etcdBroker) listWatchTasks(ctx context.Context, queue string) error {
|
||||||
|
keyPrefix := fmt.Sprintf("/machinery/v2/broker/pending_tasks/%s", queue)
|
||||||
|
|
||||||
|
// List
|
||||||
|
listCtx, listCancel := context.WithTimeout(ctx, time.Second*5)
|
||||||
|
defer listCancel()
|
||||||
|
resp, err := b.client.Get(listCtx, keyPrefix, clientv3.WithPrefix(), clientv3.WithKeysOnly())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, kv := range resp.Kvs {
|
||||||
|
if bytes.Contains(kv.Key, []byte("/assign")) {
|
||||||
|
delete(b.keyMap, string(kv.Key)[:23])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b.keyMap[string(kv.Key)] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch
|
||||||
|
watchCtx, watchCancel := context.WithTimeout(ctx, time.Minute*60)
|
||||||
|
defer watchCancel()
|
||||||
|
wc := b.client.Watch(watchCtx, keyPrefix, clientv3.WithPrefix(), clientv3.WithKeysOnly(), clientv3.WithRev(resp.Header.Revision))
|
||||||
|
for wresp := range wc {
|
||||||
|
for _, ev := range wresp.Events {
|
||||||
|
if ev.Type == clientv3.EventTypeDelete {
|
||||||
|
if bytes.Contains(ev.Kv.Key, []byte("/assign")) {
|
||||||
|
b.keyMap[string(ev.Kv.Key)] = struct{}{}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ev.Type == clientv3.EventTypePut {
|
||||||
|
if bytes.Contains(ev.Kv.Key, []byte("/assign")) {
|
||||||
|
delete(b.keyMap, string(ev.Kv.Key)[:23])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
b.keyMap[string(ev.Kv.Key)] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *etcdBroker) handleDelayedTask(ctx context.Context) error {
|
||||||
ttl := time.Second * 10
|
ttl := time.Second * 10
|
||||||
ctx, cancel := context.WithTimeout(b.ctx, ttl)
|
ctx, cancel := context.WithTimeout(ctx, ttl)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// 创建一个新的session
|
// 创建一个新的session
|
||||||
|
@ -321,13 +390,13 @@ func (b *etcdBroker) handleDelayedTask() error {
|
||||||
for _, kv := range kvs.Kvs {
|
for _, kv := range kvs.Kvs {
|
||||||
key := string(kv.Key)
|
key := string(kv.Key)
|
||||||
parts := strings.Split(key, "/")
|
parts := strings.Split(key, "/")
|
||||||
if len(parts) != 7 {
|
if len(parts) != 8 {
|
||||||
log.WARNING.Printf("invalid delay task %s, continue", key)
|
log.WARNING.Printf("invalid delay task %s, continue", key)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cmp := clientv3.Compare(clientv3.ModRevision(key), "=", kv.ModRevision)
|
cmp := clientv3.Compare(clientv3.ModRevision(key), "=", kv.ModRevision)
|
||||||
deleteReq := clientv3.OpDelete(key)
|
deleteReq := clientv3.OpDelete(key)
|
||||||
pendingKey := fmt.Sprintf("/machinery/v2/broker/pending_tasks/%s/%s", parts[5], parts[6])
|
pendingKey := fmt.Sprintf("/machinery/v2/broker/pending_tasks/%s/%s", parts[6], parts[7])
|
||||||
putReq := clientv3.OpPut(pendingKey, string(kv.Value))
|
putReq := clientv3.OpPut(pendingKey, string(kv.Value))
|
||||||
c, err := b.client.Txn(b.ctx).If(cmp).Then(deleteReq, putReq).Commit()
|
c, err := b.client.Txn(b.ctx).If(cmp).Then(deleteReq, putReq).Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
Loading…
Reference in New Issue