mirror of
https://github.com/christianselig/apollo-backend
synced 2024-11-22 11:47:42 +00:00
do batches in redis
This commit is contained in:
parent
b7aea89cfc
commit
3ce858927b
2 changed files with 58 additions and 26 deletions
|
@ -2,6 +2,7 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
@ -19,6 +20,10 @@ import (
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
batchSize = 100
|
||||||
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
_ = godotenv.Load()
|
_ = godotenv.Load()
|
||||||
|
|
||||||
|
@ -172,36 +177,56 @@ func enqueueAccounts(ctx context.Context, logger *logrus.Logger, statsd *statsd.
|
||||||
enqueued := 0
|
enqueued := 0
|
||||||
skipped := 0
|
skipped := 0
|
||||||
failed := 0
|
failed := 0
|
||||||
for _, id := range ids {
|
|
||||||
payload := fmt.Sprintf("%d", id)
|
|
||||||
lockKey := fmt.Sprintf("locks:accounts:%s", payload)
|
|
||||||
|
|
||||||
_, err := redisConn.Get(ctx, lockKey).Result()
|
// Split ids in batches
|
||||||
if err == nil {
|
for i := 0; i < len(ids); i += batchSize {
|
||||||
skipped++
|
j := i + batchSize
|
||||||
continue
|
if j > len(ids) {
|
||||||
} else if err != redis.Nil {
|
j = len(ids)
|
||||||
|
}
|
||||||
|
batch := Int64Slice(ids[i:j])
|
||||||
|
|
||||||
|
logger.WithFields(logrus.Fields{
|
||||||
|
"len": len(batch),
|
||||||
|
}).Debug("enqueueing batch")
|
||||||
|
|
||||||
|
lua := `
|
||||||
|
local retv={}
|
||||||
|
local ids=cjson.decode(ARGV[1])
|
||||||
|
|
||||||
|
for i=1, #ids do
|
||||||
|
local key = "locks:accounts:" .. ids[i]
|
||||||
|
if redis.call("exists", key) == 0 then
|
||||||
|
retv[#retv + 1] = ids[i]
|
||||||
|
end
|
||||||
|
redis.call("setex", key, 60, 1)
|
||||||
|
end
|
||||||
|
|
||||||
|
return retv
|
||||||
|
`
|
||||||
|
|
||||||
|
res, err := redisConn.Eval(ctx, lua, []string{}, batch).Result()
|
||||||
|
if err != nil {
|
||||||
logger.WithFields(logrus.Fields{
|
logger.WithFields(logrus.Fields{
|
||||||
"lockKey": lockKey,
|
"err": err,
|
||||||
"err": err,
|
}).Error("failed to check for locked accounts")
|
||||||
}).Error("failed to check for account lock")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := redisConn.SetEX(ctx, lockKey, true, 60*time.Second).Err(); err != nil {
|
vals := res.([]interface{})
|
||||||
logger.WithFields(logrus.Fields{
|
enqueued += len(vals)
|
||||||
"lockKey": lockKey,
|
skipped += len(batch) - len(vals)
|
||||||
"err": err,
|
|
||||||
}).Error("failed to lock account")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = queue.Publish(payload); err != nil {
|
for _, val := range vals {
|
||||||
logger.WithFields(logrus.Fields{
|
id := val.(int64)
|
||||||
"accountID": payload,
|
payload := fmt.Sprintf("%d", id)
|
||||||
"err": err,
|
if err = queue.Publish(payload); err != nil {
|
||||||
}).Error("failed to enqueue account")
|
logger.WithFields(logrus.Fields{
|
||||||
failed++
|
"accountID": payload,
|
||||||
} else {
|
"err": err,
|
||||||
enqueued++
|
}).Error("failed to enqueue account")
|
||||||
|
failed++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -224,3 +249,10 @@ func logErrors(errChan <-chan error) {
|
||||||
log.Print("error: ", err)
|
log.Print("error: ", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Int64Slice []int64
|
||||||
|
|
||||||
|
func (ii Int64Slice) MarshalBinary() (data []byte, err error) {
|
||||||
|
bytes, err := json.Marshal(ii)
|
||||||
|
return bytes, err
|
||||||
|
}
|
||||||
|
|
|
@ -404,7 +404,7 @@ func (c *Consumer) Consume(delivery rmq.Delivery) {
|
||||||
"accountID": delivery.Payload(),
|
"accountID": delivery.Payload(),
|
||||||
"token": device.APNSToken,
|
"token": device.APNSToken,
|
||||||
"redditUser": account.Username,
|
"redditUser": account.Username,
|
||||||
}).Debug("sent notification")
|
}).Info("sent notification")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue