apollo-backend/internal/cmd/scheduler.go

492 lines
12 KiB
Go
Raw Normal View History

package cmd
2021-07-08 23:03:46 +00:00
import (
"context"
2021-07-09 03:12:50 +00:00
"encoding/json"
2021-07-08 23:03:46 +00:00
"fmt"
2022-05-27 17:27:19 +00:00
"net/http"
_ "net/http/pprof"
"strconv"
2022-06-04 14:21:29 +00:00
"sync"
2021-07-08 23:03:46 +00:00
"time"
2021-07-09 02:09:14 +00:00
"github.com/DataDog/datadog-go/statsd"
2021-07-08 23:03:46 +00:00
"github.com/adjust/rmq/v4"
"github.com/go-co-op/gocron"
"github.com/go-redis/redis/v8"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/spf13/cobra"
2022-05-23 18:17:25 +00:00
"go.uber.org/zap"
2021-08-14 16:08:17 +00:00
2022-03-28 21:05:01 +00:00
"github.com/christianselig/apollo-backend/internal/cmdutil"
"github.com/christianselig/apollo-backend/internal/domain"
"github.com/christianselig/apollo-backend/internal/repository"
2021-07-09 03:12:50 +00:00
)
2022-05-23 22:51:30 +00:00
const (
2022-05-27 20:12:50 +00:00
batchSize = 1000
maxNotificationChecks = 5000
2022-05-23 22:51:30 +00:00
)
2022-03-28 21:05:01 +00:00
func SchedulerCmd(ctx context.Context) *cobra.Command {
cmd := &cobra.Command{
Use: "scheduler",
Args: cobra.ExactArgs(0),
Short: "Schedules jobs and runs several maintenance tasks periodically.",
RunE: func(cmd *cobra.Command, args []string) error {
2022-05-23 18:29:15 +00:00
logger := cmdutil.NewLogger("scheduler")
2022-05-23 18:17:25 +00:00
defer func() { _ = logger.Sync() }()
statsd, err := cmdutil.NewStatsdClient()
if err != nil {
return err
}
defer statsd.Close()
2021-07-20 17:00:53 +00:00
db, err := cmdutil.NewDatabasePool(ctx, 1)
if err != nil {
return err
}
defer db.Close()
redis, err := cmdutil.NewRedisClient(ctx)
if err != nil {
return err
}
defer redis.Close()
queue, err := cmdutil.NewQueueClient(logger, redis, "worker")
if err != nil {
return err
}
// Eval lua so that we don't keep parsing it
luaSha, err := evalScript(ctx, redis)
if err != nil {
return err
}
notifQueue, err := queue.OpenQueue("notifications")
if err != nil {
return err
}
2021-09-25 16:56:01 +00:00
subredditQueue, err := queue.OpenQueue("subreddits")
if err != nil {
return err
}
2021-10-10 15:51:42 +00:00
trendingQueue, err := queue.OpenQueue("trending")
if err != nil {
return err
}
2021-10-09 14:59:20 +00:00
userQueue, err := queue.OpenQueue("users")
if err != nil {
return err
}
2021-10-17 14:17:41 +00:00
stuckNotificationsQueue, err := queue.OpenQueue("stuck-notifications")
if err != nil {
return err
}
s := gocron.NewScheduler(time.UTC)
_, _ = s.Every(500).Milliseconds().Do(func() { enqueueAccounts(ctx, logger, statsd, db, redis, luaSha, notifQueue) })
2022-05-27 18:40:40 +00:00
_, _ = s.Every(5).Second().Do(func() { enqueueSubreddits(ctx, logger, statsd, db, []rmq.Queue{subredditQueue, trendingQueue}) })
_, _ = s.Every(5).Second().Do(func() { enqueueUsers(ctx, logger, statsd, db, userQueue) })
_, _ = s.Every(5).Second().Do(func() { cleanQueues(logger, queue) })
_, _ = s.Every(5).Second().Do(func() { enqueueStuckAccounts(ctx, logger, statsd, db, stuckNotificationsQueue) })
_, _ = s.Every(1).Minute().Do(func() { reportStats(ctx, logger, statsd, db) })
2021-09-25 13:19:42 +00:00
_, _ = s.Every(1).Minute().Do(func() { pruneAccounts(ctx, logger, db) })
_, _ = s.Every(1).Minute().Do(func() { pruneDevices(ctx, logger, db) })
s.StartAsync()
2022-05-27 17:27:19 +00:00
srv := &http.Server{Addr: ":8080"}
go func() { _ = srv.ListenAndServe() }()
<-ctx.Done()
s.Stop()
return nil
},
2021-07-08 23:03:46 +00:00
}
return cmd
2021-07-08 23:03:46 +00:00
}
func evalScript(ctx context.Context, redis *redis.Client) (string, error) {
lua := fmt.Sprintf(`
local retv={}
local ids=cjson.decode(ARGV[1])
for i=1, #ids do
local key = KEYS[1] .. ":" .. ids[i]
if redis.call("exists", key) == 0 then
2022-05-22 23:57:29 +00:00
redis.call("setex", key, %.0f, 1)
retv[#retv + 1] = ids[i]
end
end
return retv
2022-05-22 23:57:29 +00:00
`, domain.NotificationCheckTimeout.Seconds())
return redis.ScriptLoad(ctx, lua).Result()
}
2022-05-23 18:17:25 +00:00
func pruneAccounts(ctx context.Context, logger *zap.Logger, pool *pgxpool.Pool) {
2022-03-28 21:05:01 +00:00
expiry := time.Now().Add(-domain.StaleTokenThreshold)
2021-08-14 15:54:48 +00:00
ar := repository.NewPostgresAccount(pool)
2022-03-28 21:05:01 +00:00
stale, err := ar.PruneStale(ctx, expiry)
2021-08-14 15:54:48 +00:00
if err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to clean stale accounts", zap.Error(err))
2021-08-14 15:54:48 +00:00
return
}
2021-08-14 15:59:13 +00:00
orphaned, err := ar.PruneOrphaned(ctx)
2021-07-12 19:36:22 +00:00
if err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to clean orphaned accounts", zap.Error(err))
2021-07-12 19:36:22 +00:00
return
}
if count := stale + orphaned; count > 0 {
2022-05-23 18:17:25 +00:00
logger.Info("pruned accounts", zap.Int64("stale", stale), zap.Int64("orphaned", orphaned))
2021-07-23 00:22:46 +00:00
}
2021-07-12 19:36:22 +00:00
}
2022-05-23 18:17:25 +00:00
func pruneDevices(ctx context.Context, logger *zap.Logger, pool *pgxpool.Pool) {
2022-03-28 21:05:01 +00:00
now := time.Now()
2021-08-14 16:08:17 +00:00
dr := repository.NewPostgresDevice(pool)
2022-03-28 21:05:01 +00:00
count, err := dr.PruneStale(ctx, now)
2021-08-14 16:08:17 +00:00
if err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to clean stale devices", zap.Error(err))
2021-08-14 16:08:17 +00:00
return
}
if count > 0 {
2022-05-23 18:17:25 +00:00
logger.Info("pruned devices", zap.Int64("count", count))
2021-08-14 16:08:17 +00:00
}
}
2022-05-23 18:17:25 +00:00
func cleanQueues(logger *zap.Logger, jobsConn rmq.Connection) {
2021-07-12 19:36:22 +00:00
cleaner := rmq.NewCleaner(jobsConn)
count, err := cleaner.Clean()
if err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to clean jobs from queues", zap.Error(err))
2021-07-12 19:36:22 +00:00
return
}
2021-10-17 14:17:41 +00:00
if count > 0 {
2022-05-23 18:17:25 +00:00
logger.Info("returned jobs to queues", zap.Int64("count", count))
2021-10-17 14:17:41 +00:00
}
2021-07-12 19:36:22 +00:00
}
2022-05-23 18:17:25 +00:00
func reportStats(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, pool *pgxpool.Pool) {
var (
count int64
metrics = []struct {
query string
name string
}{
{"SELECT COUNT(*) FROM accounts", "apollo.registrations.accounts"},
{"SELECT COUNT(*) FROM devices", "apollo.registrations.devices"},
2021-10-17 14:17:41 +00:00
{"SELECT COUNT(*) FROM subreddits", "apollo.registrations.subreddits"},
{"SELECT COUNT(*) FROM users", "apollo.registrations.users"},
}
)
for _, metric := range metrics {
2021-09-25 13:19:42 +00:00
_ = pool.QueryRow(ctx, metric.query).Scan(&count)
_ = statsd.Gauge(metric.name, float64(count), []string{}, 1)
2022-05-23 18:17:25 +00:00
logger.Debug("fetched metrics", zap.String("metric", metric.name), zap.Int64("count", count))
}
}
2022-05-23 18:17:25 +00:00
func enqueueUsers(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, pool *pgxpool.Pool, queue rmq.Queue) {
2021-10-09 14:59:20 +00:00
now := time.Now()
2022-03-28 21:05:01 +00:00
next := now.Add(domain.NotificationCheckInterval)
ids := []int64{}
2021-10-09 14:59:20 +00:00
2021-10-17 16:04:09 +00:00
defer func() {
tags := []string{"queue:users"}
_ = statsd.Histogram("apollo.queue.enqueued", float64(len(ids)), tags, 1)
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
}()
2021-10-09 14:59:20 +00:00
err := pool.BeginFunc(ctx, func(tx pgx.Tx) error {
stmt := `
2022-05-21 21:15:24 +00:00
UPDATE users
SET next_check_at = $2
WHERE id IN (
SELECT id
2021-10-09 14:59:20 +00:00
FROM users
2022-03-28 21:05:01 +00:00
WHERE next_check_at < $1
ORDER BY next_check_at
2022-05-21 21:15:24 +00:00
FOR UPDATE SKIP LOCKED
2021-10-09 14:59:20 +00:00
LIMIT 100
)
RETURNING users.id`
2022-03-28 21:05:01 +00:00
rows, err := tx.Query(ctx, stmt, now, next)
2021-10-09 14:59:20 +00:00
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var id int64
2021-10-09 14:59:20 +00:00
_ = rows.Scan(&id)
ids = append(ids, id)
2021-10-09 14:59:20 +00:00
}
return nil
})
if err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to fetch batch of users", zap.Error(err))
2021-10-09 14:59:20 +00:00
return
}
if len(ids) == 0 {
return
}
2022-05-23 18:17:25 +00:00
logger.Debug("enqueueing user batch", zap.Int("count", len(ids)), zap.Time("start", now))
2021-10-09 14:59:20 +00:00
batchIds := make([]string, len(ids))
for i, id := range ids {
batchIds[i] = strconv.FormatInt(id, 10)
}
if err = queue.Publish(batchIds...); err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to enqueue user batch", zap.Error(err))
2021-10-09 14:59:20 +00:00
}
}
2022-05-23 18:17:25 +00:00
func enqueueSubreddits(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, pool *pgxpool.Pool, queues []rmq.Queue) {
2021-09-25 16:56:01 +00:00
now := time.Now()
2022-03-28 21:05:01 +00:00
next := now.Add(domain.SubredditCheckInterval)
2021-09-25 16:56:01 +00:00
ids := []int64{}
2021-10-17 16:04:09 +00:00
defer func() {
tags := []string{"queue:subreddits"}
_ = statsd.Histogram("apollo.queue.enqueued", float64(len(ids)), tags, 1)
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
}()
2021-09-25 16:56:01 +00:00
err := pool.BeginFunc(ctx, func(tx pgx.Tx) error {
stmt := `
2022-05-21 21:15:24 +00:00
UPDATE subreddits
SET next_check_at = $2
WHERE subreddits.id IN(
SELECT id
2021-09-25 16:56:01 +00:00
FROM subreddits
2022-03-28 21:05:01 +00:00
WHERE next_check_at < $1
ORDER BY next_check_at
2022-05-21 21:15:24 +00:00
FOR UPDATE SKIP LOCKED
2021-09-25 16:56:01 +00:00
LIMIT 100
)
RETURNING subreddits.id`
2022-03-28 21:05:01 +00:00
rows, err := tx.Query(ctx, stmt, now, next)
2021-09-25 16:56:01 +00:00
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var id int64
_ = rows.Scan(&id)
ids = append(ids, id)
}
return nil
})
if err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to fetch batch of subreddits", zap.Error(err))
2021-09-25 16:56:01 +00:00
return
}
if len(ids) == 0 {
return
}
2022-05-23 18:17:25 +00:00
logger.Debug("enqueueing subreddit batch", zap.Int("count", len(ids)), zap.Time("start", now))
2021-09-25 16:56:01 +00:00
batchIds := make([]string, len(ids))
for i, id := range ids {
batchIds[i] = strconv.FormatInt(id, 10)
}
2021-10-10 15:51:42 +00:00
for _, queue := range queues {
if err = queue.Publish(batchIds...); err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to enqueue subreddit batch", zap.Error(err))
2021-10-10 15:51:42 +00:00
}
2021-09-25 16:56:01 +00:00
}
}
2022-05-23 18:17:25 +00:00
func enqueueStuckAccounts(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, pool *pgxpool.Pool, queue rmq.Queue) {
2021-10-17 14:17:41 +00:00
now := time.Now()
2022-03-28 21:05:01 +00:00
next := now.Add(domain.StuckNotificationCheckInterval)
2021-10-17 14:17:41 +00:00
ids := []int64{}
2021-10-17 16:04:09 +00:00
defer func() {
tags := []string{"queue:stuck-accounts"}
_ = statsd.Histogram("apollo.queue.enqueued", float64(len(ids)), tags, 1)
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
}()
2021-10-17 14:17:41 +00:00
err := pool.BeginFunc(ctx, func(tx pgx.Tx) error {
stmt := `
2022-05-21 21:15:24 +00:00
UPDATE accounts
SET next_stuck_notification_check_at = $2
WHERE accounts.id IN(
SELECT id
2021-10-17 14:17:41 +00:00
FROM accounts
2022-05-21 21:15:24 +00:00
WHERE next_stuck_notification_check_at < $1
2022-03-28 21:05:01 +00:00
ORDER BY next_stuck_notification_check_at
2022-05-21 21:15:24 +00:00
FOR UPDATE SKIP LOCKED
2021-10-17 14:17:41 +00:00
LIMIT 500
)
RETURNING accounts.id`
2022-03-28 21:05:01 +00:00
rows, err := tx.Query(ctx, stmt, now, next)
2021-10-17 14:17:41 +00:00
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var id int64
_ = rows.Scan(&id)
ids = append(ids, id)
}
return nil
})
if err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to fetch accounts", zap.Error(err))
2021-10-17 14:17:41 +00:00
return
}
if len(ids) == 0 {
return
}
2022-05-23 18:17:25 +00:00
logger.Debug("enqueueing stuck account batch", zap.Int("count", len(ids)), zap.Time("start", now))
2021-07-09 02:15:28 +00:00
2021-10-17 14:17:41 +00:00
batchIds := make([]string, len(ids))
for i, id := range ids {
batchIds[i] = strconv.FormatInt(id, 10)
}
if err = queue.Publish(batchIds...); err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to enqueue stuck account batch", zap.Error(err))
2021-10-17 14:17:41 +00:00
}
}
2022-05-23 18:17:25 +00:00
func enqueueAccounts(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, pool *pgxpool.Pool, redisConn *redis.Client, luaSha string, queue rmq.Queue) {
2021-10-17 14:17:41 +00:00
now := time.Now()
2022-03-28 21:05:01 +00:00
next := now.Add(domain.NotificationCheckInterval)
2022-07-13 15:28:25 +00:00
ids := make([]string, maxNotificationChecks)
2022-05-23 22:54:38 +00:00
idslen := 0
2021-10-17 16:04:09 +00:00
enqueued := 0
skipped := 0
defer func() {
tags := []string{"queue:notifications"}
_ = statsd.Histogram("apollo.queue.enqueued", float64(enqueued), tags, 1)
_ = statsd.Histogram("apollo.queue.skipped", float64(skipped), tags, 1)
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
}()
2021-07-09 01:07:01 +00:00
2021-07-08 23:03:46 +00:00
err := pool.BeginFunc(ctx, func(tx pgx.Tx) error {
2022-05-23 22:51:30 +00:00
stmt := fmt.Sprintf(`
2022-05-21 21:15:24 +00:00
UPDATE accounts
SET next_notification_check_at = $2
WHERE accounts.id IN(
SELECT id
2021-07-08 23:03:46 +00:00
FROM accounts
2022-05-21 21:15:24 +00:00
WHERE next_notification_check_at < $1
2022-03-28 21:05:01 +00:00
ORDER BY next_notification_check_at
2022-05-21 21:15:24 +00:00
FOR UPDATE SKIP LOCKED
2022-05-23 22:51:30 +00:00
LIMIT %d
2021-07-08 23:03:46 +00:00
)
2022-07-13 15:28:25 +00:00
RETURNING accounts.account_id`, maxNotificationChecks)
2022-03-28 21:05:01 +00:00
rows, err := tx.Query(ctx, stmt, now, next)
2021-07-08 23:03:46 +00:00
if err != nil {
return err
}
defer rows.Close()
2022-05-23 22:51:30 +00:00
for i := 0; rows.Next(); i++ {
2022-07-13 15:28:25 +00:00
_ = rows.Scan(&ids[i])
2022-05-23 22:54:38 +00:00
idslen = i
2021-07-08 23:03:46 +00:00
}
return nil
})
if err != nil {
2022-05-23 18:17:25 +00:00
logger.Error("failed to fetch batch of accounts", zap.Error(err))
2021-07-08 23:03:46 +00:00
return
}
2022-05-23 22:54:38 +00:00
if idslen == 0 {
2021-10-17 14:17:41 +00:00
return
}
2022-05-23 18:17:25 +00:00
logger.Debug("enqueueing account batch", zap.Int("count", len(ids)), zap.Time("start", now))
2021-07-09 03:12:50 +00:00
// Split ids in batches
2022-06-04 14:24:22 +00:00
wg := sync.WaitGroup{}
2022-05-23 22:54:38 +00:00
for i := 0; i < idslen; i += batchSize {
2022-06-04 14:24:22 +00:00
wg.Add(1)
2022-06-04 14:21:29 +00:00
go func(offset int) {
defer wg.Done()
2021-07-09 03:12:50 +00:00
2022-06-04 14:21:29 +00:00
j := offset + batchSize
if j > idslen {
j = idslen
}
2022-07-13 15:28:25 +00:00
batch := ids[offset:j]
2021-07-09 03:12:50 +00:00
2022-06-04 14:21:29 +00:00
logger.Debug("enqueueing batch", zap.Int("len", len(batch)))
2021-07-09 00:26:01 +00:00
2022-06-04 14:21:29 +00:00
res, err := redisConn.EvalSha(ctx, luaSha, []string{"locks:accounts"}, batch).Result()
if err != nil {
logger.Error("failed to check for locked accounts", zap.Error(err))
}
2021-07-09 03:12:50 +00:00
2022-06-04 14:21:29 +00:00
vals := res.([]interface{})
skipped += len(batch) - len(vals)
enqueued += len(vals)
2022-06-04 14:21:29 +00:00
if len(vals) == 0 {
return
}
2022-06-04 14:21:29 +00:00
batchIds := make([]string, len(vals))
for k, v := range vals {
2022-07-13 15:28:25 +00:00
batchIds[k] = v.(string)
2022-06-04 14:21:29 +00:00
}
if err = queue.Publish(batchIds...); err != nil {
logger.Error("failed to enqueue account batch", zap.Error(err))
}
2022-06-04 14:23:45 +00:00
}(i)
2021-07-08 23:03:46 +00:00
}
2022-06-04 14:21:29 +00:00
wg.Wait()
2021-07-08 23:03:46 +00:00
2022-05-23 18:17:25 +00:00
logger.Debug("done enqueueing account batch", zap.Int("count", enqueued), zap.Int("skipped", skipped), zap.Time("start", now))
2021-07-08 23:03:46 +00:00
}
2021-07-09 03:12:50 +00:00
type Int64Slice []int64
func (ii Int64Slice) MarshalBinary() (data []byte, err error) {
bytes, err := json.Marshal(ii)
return bytes, err
}