mirror of
https://github.com/christianselig/apollo-backend
synced 2024-11-14 16:07:42 +00:00
remove some load
This commit is contained in:
parent
3b08d6b8e4
commit
f4fea41435
2 changed files with 83 additions and 102 deletions
|
@ -14,7 +14,6 @@ import (
|
||||||
"github.com/adjust/rmq/v4"
|
"github.com/adjust/rmq/v4"
|
||||||
"github.com/go-co-op/gocron"
|
"github.com/go-co-op/gocron"
|
||||||
"github.com/go-redis/redis/v8"
|
"github.com/go-redis/redis/v8"
|
||||||
"github.com/jackc/pgx/v4"
|
|
||||||
"github.com/jackc/pgx/v4/pgxpool"
|
"github.com/jackc/pgx/v4/pgxpool"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -219,7 +218,6 @@ func enqueueUsers(ctx context.Context, logger *zap.Logger, statsd *statsd.Client
|
||||||
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
|
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err := pool.BeginFunc(ctx, func(tx pgx.Tx) error {
|
|
||||||
stmt := `
|
stmt := `
|
||||||
UPDATE users
|
UPDATE users
|
||||||
SET next_check_at = $2
|
SET next_check_at = $2
|
||||||
|
@ -232,23 +230,17 @@ func enqueueUsers(ctx context.Context, logger *zap.Logger, statsd *statsd.Client
|
||||||
LIMIT 100
|
LIMIT 100
|
||||||
)
|
)
|
||||||
RETURNING users.id`
|
RETURNING users.id`
|
||||||
rows, err := tx.Query(ctx, stmt, now, next)
|
rows, err := pool.Query(ctx, stmt, now, next)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
logger.Error("failed to fetch batch of users", zap.Error(err))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var id int64
|
var id int64
|
||||||
_ = rows.Scan(&id)
|
_ = rows.Scan(&id)
|
||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
}
|
}
|
||||||
return nil
|
rows.Close()
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("failed to fetch batch of users", zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ids) == 0 {
|
if len(ids) == 0 {
|
||||||
return
|
return
|
||||||
|
@ -278,7 +270,6 @@ func enqueueSubreddits(ctx context.Context, logger *zap.Logger, statsd *statsd.C
|
||||||
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
|
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err := pool.BeginFunc(ctx, func(tx pgx.Tx) error {
|
|
||||||
stmt := `
|
stmt := `
|
||||||
UPDATE subreddits
|
UPDATE subreddits
|
||||||
SET next_check_at = $2
|
SET next_check_at = $2
|
||||||
|
@ -291,23 +282,17 @@ func enqueueSubreddits(ctx context.Context, logger *zap.Logger, statsd *statsd.C
|
||||||
LIMIT 100
|
LIMIT 100
|
||||||
)
|
)
|
||||||
RETURNING subreddits.id`
|
RETURNING subreddits.id`
|
||||||
rows, err := tx.Query(ctx, stmt, now, next)
|
rows, err := pool.Query(ctx, stmt, now, next)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
logger.Error("failed to fetch batch of subreddits", zap.Error(err))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var id int64
|
var id int64
|
||||||
_ = rows.Scan(&id)
|
_ = rows.Scan(&id)
|
||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
}
|
}
|
||||||
return nil
|
rows.Close()
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("failed to fetch batch of subreddits", zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ids) == 0 {
|
if len(ids) == 0 {
|
||||||
return
|
return
|
||||||
|
@ -340,7 +325,6 @@ func enqueueStuckAccounts(ctx context.Context, logger *zap.Logger, statsd *stats
|
||||||
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
|
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err := pool.BeginFunc(ctx, func(tx pgx.Tx) error {
|
|
||||||
stmt := `
|
stmt := `
|
||||||
UPDATE accounts
|
UPDATE accounts
|
||||||
SET next_stuck_notification_check_at = $2
|
SET next_stuck_notification_check_at = $2
|
||||||
|
@ -353,23 +337,18 @@ func enqueueStuckAccounts(ctx context.Context, logger *zap.Logger, statsd *stats
|
||||||
LIMIT 500
|
LIMIT 500
|
||||||
)
|
)
|
||||||
RETURNING accounts.id`
|
RETURNING accounts.id`
|
||||||
rows, err := tx.Query(ctx, stmt, now, next)
|
rows, err := pool.Query(ctx, stmt, now, next)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
logger.Error("failed to fetch accounts", zap.Error(err))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var id int64
|
var id int64
|
||||||
_ = rows.Scan(&id)
|
_ = rows.Scan(&id)
|
||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
}
|
}
|
||||||
return nil
|
rows.Close()
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("failed to fetch accounts", zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ids) == 0 {
|
if len(ids) == 0 {
|
||||||
return
|
return
|
||||||
|
@ -403,7 +382,6 @@ func enqueueAccounts(ctx context.Context, logger *zap.Logger, statsd *statsd.Cli
|
||||||
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
|
_ = statsd.Histogram("apollo.queue.runtime", float64(time.Since(now).Milliseconds()), tags, 1)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err := pool.BeginFunc(ctx, func(tx pgx.Tx) error {
|
|
||||||
stmt := fmt.Sprintf(`
|
stmt := fmt.Sprintf(`
|
||||||
UPDATE accounts
|
UPDATE accounts
|
||||||
SET next_notification_check_at = $2
|
SET next_notification_check_at = $2
|
||||||
|
@ -416,22 +394,16 @@ func enqueueAccounts(ctx context.Context, logger *zap.Logger, statsd *statsd.Cli
|
||||||
LIMIT %d
|
LIMIT %d
|
||||||
)
|
)
|
||||||
RETURNING accounts.reddit_account_id`, maxNotificationChecks)
|
RETURNING accounts.reddit_account_id`, maxNotificationChecks)
|
||||||
rows, err := tx.Query(ctx, stmt, now, next)
|
rows, err := pool.Query(ctx, stmt, now, next)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
for i := 0; rows.Next(); i++ {
|
|
||||||
_ = rows.Scan(&ids[i])
|
|
||||||
idslen = i
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("failed to fetch batch of accounts", zap.Error(err))
|
logger.Error("failed to fetch batch of accounts", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
for i := 0; rows.Next(); i++ {
|
||||||
|
_ = rows.Scan(&ids[i])
|
||||||
|
idslen = i
|
||||||
|
}
|
||||||
|
rows.Close()
|
||||||
|
|
||||||
if idslen == 0 {
|
if idslen == 0 {
|
||||||
return
|
return
|
||||||
|
|
|
@ -356,10 +356,14 @@ func (rac *AuthenticatedClient) logRequest() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return rac.client.redis.HIncrBy(context.Background(), "reddit:requests", rac.redditId, 1).Err()
|
return nil
|
||||||
|
// return rac.client.redis.HIncrBy(context.Background(), "reddit:requests", rac.redditId, 1).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *AuthenticatedClient) isRateLimited() bool {
|
func (rac *AuthenticatedClient) isRateLimited() bool {
|
||||||
|
return false
|
||||||
|
|
||||||
|
/*
|
||||||
if rac.redditId == SkipRateLimiting {
|
if rac.redditId == SkipRateLimiting {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -367,9 +371,13 @@ func (rac *AuthenticatedClient) isRateLimited() bool {
|
||||||
key := fmt.Sprintf("reddit:%s:ratelimited", rac.redditId)
|
key := fmt.Sprintf("reddit:%s:ratelimited", rac.redditId)
|
||||||
_, err := rac.client.redis.Get(context.Background(), key).Result()
|
_, err := rac.client.redis.Get(context.Background(), key).Result()
|
||||||
return err != redis.Nil
|
return err != redis.Nil
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *AuthenticatedClient) markRateLimited(rli *RateLimitingInfo) error {
|
func (rac *AuthenticatedClient) markRateLimited(rli *RateLimitingInfo) error {
|
||||||
|
return nil
|
||||||
|
|
||||||
|
/*
|
||||||
if rac.redditId == SkipRateLimiting {
|
if rac.redditId == SkipRateLimiting {
|
||||||
return ErrRequiresRedditId
|
return ErrRequiresRedditId
|
||||||
}
|
}
|
||||||
|
@ -397,6 +405,7 @@ func (rac *AuthenticatedClient) markRateLimited(rli *RateLimitingInfo) error {
|
||||||
|
|
||||||
_, err := rac.client.redis.SetEX(context.Background(), key, info, duration).Result()
|
_, err := rac.client.redis.SetEX(context.Background(), key, info, duration).Result()
|
||||||
return err
|
return err
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *AuthenticatedClient) RefreshTokens(ctx context.Context, opts ...RequestOption) (*RefreshTokenResponse, error) {
|
func (rac *AuthenticatedClient) RefreshTokens(ctx context.Context, opts ...RequestOption) (*RefreshTokenResponse, error) {
|
||||||
|
|
Loading…
Reference in a new issue