parallelize scheduler better

This commit is contained in:
Andre Medeiros 2022-06-04 10:21:29 -04:00
parent b29f120eea
commit b23a158471

View file

@ -7,6 +7,7 @@ import (
"net/http"
_ "net/http/pprof"
"strconv"
"sync"
"time"
"github.com/DataDog/datadog-go/statsd"
@ -92,7 +93,7 @@ func SchedulerCmd(ctx context.Context) *cobra.Command {
}
s := gocron.NewScheduler(time.UTC)
_, _ = s.Every(500).Milliseconds().Do(func() { enqueueAccounts(ctx, logger, statsd, db, redis, luaSha, notifQueue) })
_, _ = s.Every(500).Milliseconds().SingletonMode().Do(func() { enqueueAccounts(ctx, logger, statsd, db, redis, luaSha, notifQueue) })
_, _ = s.Every(5).Second().Do(func() { enqueueSubreddits(ctx, logger, statsd, db, []rmq.Queue{subredditQueue, trendingQueue}) })
_, _ = s.Every(5).Second().Do(func() { enqueueUsers(ctx, logger, statsd, db, userQueue) })
_, _ = s.Every(5).Second().Do(func() { cleanQueues(logger, queue) })
@ -441,13 +442,20 @@ func enqueueAccounts(ctx context.Context, logger *zap.Logger, statsd *statsd.Cli
logger.Debug("enqueueing account batch", zap.Int("count", len(ids)), zap.Time("start", now))
batches := (idslen / batchSize) + 1
wg := sync.WaitGroup{}
wg.Add(batches)
// Split ids in batches
for i := 0; i < idslen; i += batchSize {
j := i + batchSize
go func(offset int) {
defer wg.Done()
j := offset + batchSize
if j > idslen {
j = idslen
}
batch := Int64Slice(ids[i:j])
batch := Int64Slice(ids[offset:j])
logger.Debug("enqueueing batch", zap.Int("len", len(batch)))
@ -461,7 +469,7 @@ func enqueueAccounts(ctx context.Context, logger *zap.Logger, statsd *statsd.Cli
enqueued += len(vals)
if len(vals) == 0 {
continue
return
}
batchIds := make([]string, len(vals))
@ -472,7 +480,9 @@ func enqueueAccounts(ctx context.Context, logger *zap.Logger, statsd *statsd.Cli
if err = queue.Publish(batchIds...); err != nil {
logger.Error("failed to enqueue account batch", zap.Error(err))
}
}(i * batchSize)
}
wg.Wait()
logger.Debug("done enqueueing account batch", zap.Int("count", enqueued), zap.Int("skipped", skipped), zap.Time("start", now))
}