migrate out of logrus (#76)

This commit is contained in:
André Medeiros 2022-05-23 14:17:25 -04:00 committed by GitHub
parent ccba530255
commit b1f266bf91
17 changed files with 522 additions and 596 deletions

6
go.mod
View file

@ -17,11 +17,11 @@ require (
github.com/jackc/pgx/v4 v4.16.0 github.com/jackc/pgx/v4 v4.16.0
github.com/joho/godotenv v1.4.0 github.com/joho/godotenv v1.4.0
github.com/sideshow/apns2 v0.23.0 github.com/sideshow/apns2 v0.23.0
github.com/sirupsen/logrus v1.8.1
github.com/smtp2go-oss/smtp2go-go v1.0.1 github.com/smtp2go-oss/smtp2go-go v1.0.1
github.com/spf13/cobra v1.4.0 github.com/spf13/cobra v1.4.0
github.com/stretchr/testify v1.7.1 github.com/stretchr/testify v1.7.1
github.com/valyala/fastjson v1.6.3 github.com/valyala/fastjson v1.6.3
go.uber.org/zap v1.13.0
) )
require ( require (
@ -45,10 +45,14 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
go.uber.org/atomic v1.6.0 // indirect
go.uber.org/multierr v1.5.0 // indirect
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
golang.org/x/lint v0.0.0-20190930215403-16217165b5de // indirect
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b // indirect golang.org/x/net v0.0.0-20220403103023-749bd193bc2b // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
golang.org/x/text v0.3.7 // indirect golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
) )

12
go.sum
View file

@ -2,6 +2,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q=
github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
@ -277,8 +278,6 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smtp2go-oss/smtp2go-go v1.0.1 h1:rwcoNLjOyigOzCjKp/guylKY/xJpoeypSxgtcC/g6DA= github.com/smtp2go-oss/smtp2go-go v1.0.1 h1:rwcoNLjOyigOzCjKp/guylKY/xJpoeypSxgtcC/g6DA=
@ -345,13 +344,17 @@ go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSs
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
golang.org/x/crypto v0.0.0-20170512130425-ab89591268e0/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170512130425-ab89591268e0/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@ -372,9 +375,11 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -475,12 +480,14 @@ golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20190502212712-4a2eb0188cbc/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/gonum v0.0.0-20190502212712-4a2eb0188cbc/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
@ -556,5 +563,6 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo= rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo=

View file

@ -8,7 +8,7 @@ import (
"time" "time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/sirupsen/logrus" "go.uber.org/zap"
"github.com/christianselig/apollo-backend/internal/domain" "github.com/christianselig/apollo-backend/internal/domain"
"github.com/christianselig/apollo-backend/internal/reddit" "github.com/christianselig/apollo-backend/internal/reddit"
@ -142,8 +142,8 @@ func (a *api) upsertAccountsHandler(w http.ResponseWriter, r *http.Request) {
for _, acc := range raccs { for _, acc := range raccs {
delete(accsMap, acc.NormalizedUsername()) delete(accsMap, acc.NormalizedUsername())
ac := a.reddit.NewAuthenticatedClient(reddit.SkipRateLimiting, acc.RefreshToken, acc.AccessToken) rac := a.reddit.NewAuthenticatedClient(reddit.SkipRateLimiting, acc.RefreshToken, acc.AccessToken)
tokens, err := ac.RefreshTokens(ctx) tokens, err := rac.RefreshTokens(ctx)
if err != nil { if err != nil {
a.errorResponse(w, r, 422, err) a.errorResponse(w, r, 422, err)
return return
@ -154,8 +154,8 @@ func (a *api) upsertAccountsHandler(w http.ResponseWriter, r *http.Request) {
acc.RefreshToken = tokens.RefreshToken acc.RefreshToken = tokens.RefreshToken
acc.AccessToken = tokens.AccessToken acc.AccessToken = tokens.AccessToken
ac = a.reddit.NewAuthenticatedClient(reddit.SkipRateLimiting, acc.RefreshToken, acc.AccessToken) rac = a.reddit.NewAuthenticatedClient(reddit.SkipRateLimiting, tokens.RefreshToken, tokens.AccessToken)
me, err := ac.Me(ctx) me, err := rac.Me(ctx)
if err != nil { if err != nil {
a.errorResponse(w, r, 422, err) a.errorResponse(w, r, 422, err)
@ -183,29 +183,26 @@ func (a *api) upsertAccountsHandler(w http.ResponseWriter, r *http.Request) {
} }
for _, acc := range accsMap { for _, acc := range accsMap {
fmt.Println(acc.NormalizedUsername())
_ = a.accountRepo.Disassociate(ctx, &acc, &dev) _ = a.accountRepo.Disassociate(ctx, &acc, &dev)
} }
body := fmt.Sprintf(`{"apns_token": "%s"}`, apns) body := fmt.Sprintf(`{"apns_token": "%s"}`, apns)
req, err := http.NewRequestWithContext(ctx, "POST", "https://apollopushserver.xyz/api/new-server-addition", strings.NewReader(body)) req, err := http.NewRequestWithContext(ctx, "POST", "https://apollopushserver.xyz/api/new-server-addition", strings.NewReader(body))
req.Header.Set("Authorization", "Bearer 98g5j89aurqwfcsp9khlnvgd38fa15")
if err != nil { if err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Error("could not setup request to disassociate from legacy api", zap.Error(err), zap.String("apns", apns))
"apns": apns, w.WriteHeader(http.StatusOK)
}).Error(err)
return return
} }
w.WriteHeader(http.StatusOK) req.Header.Set("Authorization", "Bearer 98g5j89aurqwfcsp9khlnvgd38fa15")
resp, _ := a.httpClient.Do(req) resp, _ := a.httpClient.Do(req)
if err != nil { if err != nil {
a.logger.WithFields(logrus.Fields{"err": err}).Error("failed to remove old client") a.logger.Error("failed to remove from old notification server", zap.Error(err), zap.String("apns", apns))
return return
} }
resp.Body.Close() resp.Body.Close()
w.WriteHeader(http.StatusOK)
} }
func (a *api) upsertAccountHandler(w http.ResponseWriter, r *http.Request) { func (a *api) upsertAccountHandler(w http.ResponseWriter, r *http.Request) {
@ -216,9 +213,7 @@ func (a *api) upsertAccountHandler(w http.ResponseWriter, r *http.Request) {
var acct domain.Account var acct domain.Account
if err := json.NewDecoder(r.Body).Decode(&acct); err != nil { if err := json.NewDecoder(r.Body).Decode(&acct); err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Error("failed to parse request json", zap.Error(err))
"err": err,
}).Info("failed to parse request json")
a.errorResponse(w, r, 422, err) a.errorResponse(w, r, 422, err)
return return
} }
@ -227,9 +222,7 @@ func (a *api) upsertAccountHandler(w http.ResponseWriter, r *http.Request) {
ac := a.reddit.NewAuthenticatedClient(reddit.SkipRateLimiting, acct.RefreshToken, acct.AccessToken) ac := a.reddit.NewAuthenticatedClient(reddit.SkipRateLimiting, acct.RefreshToken, acct.AccessToken)
tokens, err := ac.RefreshTokens(ctx) tokens, err := ac.RefreshTokens(ctx)
if err != nil { if err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Error("failed to refresh token", zap.Error(err))
"err": err,
}).Info("failed to refresh token")
a.errorResponse(w, r, 422, err) a.errorResponse(w, r, 422, err)
return return
} }
@ -243,16 +236,14 @@ func (a *api) upsertAccountHandler(w http.ResponseWriter, r *http.Request) {
me, err := ac.Me(ctx) me, err := ac.Me(ctx)
if err != nil { if err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Error("failed to grab user details from reddit", zap.Error(err))
"err": err,
}).Info("failed to grab user details from Reddit")
a.errorResponse(w, r, 500, err) a.errorResponse(w, r, 500, err)
return return
} }
if me.NormalizedUsername() != acct.NormalizedUsername() { if me.NormalizedUsername() != acct.NormalizedUsername() {
err := fmt.Errorf("wrong user: expected %s, got %s", me.NormalizedUsername(), acct.NormalizedUsername()) err := fmt.Errorf("wrong user: expected %s, got %s", me.NormalizedUsername(), acct.NormalizedUsername())
a.logger.WithFields(logrus.Fields{"err": err}).Warn("user is not who they say they are") a.logger.Warn("user is not who they say they are", zap.Error(err))
a.errorResponse(w, r, 401, err) a.errorResponse(w, r, 401, err)
return return
} }
@ -263,26 +254,20 @@ func (a *api) upsertAccountHandler(w http.ResponseWriter, r *http.Request) {
// Associate // Associate
dev, err := a.deviceRepo.GetByAPNSToken(ctx, vars["apns"]) dev, err := a.deviceRepo.GetByAPNSToken(ctx, vars["apns"])
if err != nil { if err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Error("failed to fetch device from database", zap.Error(err))
"err": err,
}).Info("failed fetching device from database")
a.errorResponse(w, r, 500, err) a.errorResponse(w, r, 500, err)
return return
} }
// Upsert account // Upsert account
if err := a.accountRepo.CreateOrUpdate(ctx, &acct); err != nil { if err := a.accountRepo.CreateOrUpdate(ctx, &acct); err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Error("failed to update account", zap.Error(err))
"err": err,
}).Info("failed updating account in database")
a.errorResponse(w, r, 500, err) a.errorResponse(w, r, 500, err)
return return
} }
if err := a.accountRepo.Associate(ctx, &acct, &dev); err != nil { if err := a.accountRepo.Associate(ctx, &acct, &dev); err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Error("failed to associate account with device", zap.Error(err))
"err": err,
}).Info("failed associating account with device")
a.errorResponse(w, r, 500, err) a.errorResponse(w, r, 500, err)
return return
} }

View file

@ -14,7 +14,7 @@ import (
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/jackc/pgx/v4/pgxpool" "github.com/jackc/pgx/v4/pgxpool"
"github.com/sideshow/apns2/token" "github.com/sideshow/apns2/token"
"github.com/sirupsen/logrus" "go.uber.org/zap"
"github.com/christianselig/apollo-backend/internal/domain" "github.com/christianselig/apollo-backend/internal/domain"
"github.com/christianselig/apollo-backend/internal/reddit" "github.com/christianselig/apollo-backend/internal/reddit"
@ -22,7 +22,7 @@ import (
) )
type api struct { type api struct {
logger *logrus.Logger logger *zap.Logger
statsd *statsd.Client statsd *statsd.Client
reddit *reddit.Client reddit *reddit.Client
apns *token.Token apns *token.Token
@ -35,7 +35,7 @@ type api struct {
userRepo domain.UserRepository userRepo domain.UserRepository
} }
func NewAPI(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, redis *redis.Client, pool *pgxpool.Pool) *api { func NewAPI(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, redis *redis.Client, pool *pgxpool.Pool) *api {
reddit := reddit.NewClient( reddit := reddit.NewClient(
os.Getenv("REDDIT_CLIENT_ID"), os.Getenv("REDDIT_CLIENT_ID"),
os.Getenv("REDDIT_CLIENT_SECRET"), os.Getenv("REDDIT_CLIENT_SECRET"),
@ -178,20 +178,20 @@ func (a *api) loggingMiddleware(next http.Handler) http.Handler {
} }
} }
logEntry := a.logger.WithFields(logrus.Fields{ fields := []zap.Field{
"duration": time.Since(start).Milliseconds(), zap.Int64("duration", time.Since(start).Milliseconds()),
"method": r.Method, zap.String("method", r.Method),
"remote#addr": remoteAddr, zap.String("remote#addr", remoteAddr),
"response#bytes": lrw.bytes, zap.Int("response#bytes", lrw.bytes),
"status": lrw.statusCode, zap.Int("status", lrw.statusCode),
"uri": r.RequestURI, zap.String("uri", r.RequestURI),
}) }
if lrw.statusCode == 200 { if lrw.statusCode == 200 {
logEntry.Info() a.logger.Info("", fields...)
} else { } else {
err := lrw.Header().Get("X-Apollo-Error") err := lrw.Header().Get("X-Apollo-Error")
logEntry.Error(err) a.logger.Error(err, fields...)
} }
}) })
} }

View file

@ -11,7 +11,7 @@ import (
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/sideshow/apns2" "github.com/sideshow/apns2"
"github.com/sideshow/apns2/payload" "github.com/sideshow/apns2/payload"
"github.com/sirupsen/logrus" "go.uber.org/zap"
"github.com/christianselig/apollo-backend/internal/domain" "github.com/christianselig/apollo-backend/internal/domain"
) )
@ -46,9 +46,7 @@ func (a *api) testDeviceHandler(w http.ResponseWriter, r *http.Request) {
d, err := a.deviceRepo.GetByAPNSToken(ctx, tok) d, err := a.deviceRepo.GetByAPNSToken(ctx, tok)
if err != nil { if err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Error("failed fetching device from database", zap.Error(err))
"err": err,
}).Info("failed fetching device from database")
a.errorResponse(w, r, 500, err) a.errorResponse(w, r, 500, err)
return return
} }
@ -83,9 +81,7 @@ func (a *api) testDeviceHandler(w http.ResponseWriter, r *http.Request) {
} }
if _, err := client.Push(notification); err != nil { if _, err := client.Push(notification); err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Info("failed to send test notification", zap.Error(err))
"err": err,
}).Info("failed to send test notification")
a.errorResponse(w, r, 500, err) a.errorResponse(w, r, 500, err)
return return
} }

View file

@ -7,7 +7,7 @@ import (
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/sideshow/apns2" "github.com/sideshow/apns2"
"github.com/sideshow/apns2/payload" "github.com/sideshow/apns2/payload"
"github.com/sirupsen/logrus" "go.uber.org/zap"
) )
const ( const (
@ -31,9 +31,7 @@ func generateNotificationTester(a *api, fun notificationGenerator) func(w http.R
d, err := a.deviceRepo.GetByAPNSToken(ctx, tok) d, err := a.deviceRepo.GetByAPNSToken(ctx, tok)
if err != nil { if err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Info("failed fetching device from database", zap.Error(err))
"err": err,
}).Info("failed fetching device from database")
a.errorResponse(w, r, 500, err) a.errorResponse(w, r, 500, err)
return return
} }
@ -56,9 +54,7 @@ func generateNotificationTester(a *api, fun notificationGenerator) func(w http.R
} }
if _, err := client.Push(notification); err != nil { if _, err := client.Push(notification); err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Info("failed to send test notification", zap.Error(err))
"err": err,
}).Info("failed to send test notification")
a.errorResponse(w, r, 500, err) a.errorResponse(w, r, 500, err)
return return
} }

View file

@ -7,7 +7,7 @@ import (
"time" "time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/sirupsen/logrus" "go.uber.org/zap"
"github.com/christianselig/apollo-backend/internal/domain" "github.com/christianselig/apollo-backend/internal/domain"
"github.com/christianselig/apollo-backend/internal/itunes" "github.com/christianselig/apollo-backend/internal/itunes"
@ -23,9 +23,7 @@ func (a *api) checkReceiptHandler(w http.ResponseWriter, r *http.Request) {
iapr, err := itunes.NewIAPResponse(string(body), true) iapr, err := itunes.NewIAPResponse(string(body), true)
if err != nil { if err != nil {
a.logger.WithFields(logrus.Fields{ a.logger.Info("failed to verify receipt", zap.Error(err))
"err": err,
}).Info("failed verifying receipt")
a.errorResponse(w, r, 500, err) a.errorResponse(w, r, 500, err)
return return
} }

View file

@ -7,8 +7,8 @@ import (
"github.com/christianselig/apollo-backend/internal/api" "github.com/christianselig/apollo-backend/internal/api"
"github.com/christianselig/apollo-backend/internal/cmdutil" "github.com/christianselig/apollo-backend/internal/cmdutil"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.uber.org/zap"
) )
func APICmd(ctx context.Context) *cobra.Command { func APICmd(ctx context.Context) *cobra.Command {
@ -24,7 +24,8 @@ func APICmd(ctx context.Context) *cobra.Command {
port, _ = strconv.Atoi(os.Getenv("PORT")) port, _ = strconv.Atoi(os.Getenv("PORT"))
} }
logger := cmdutil.NewLogrusLogger(false) logger := cmdutil.NewLogger(false)
defer func() { _ = logger.Sync() }()
statsd, err := cmdutil.NewStatsdClient() statsd, err := cmdutil.NewStatsdClient()
if err != nil { if err != nil {
@ -49,9 +50,7 @@ func APICmd(ctx context.Context) *cobra.Command {
go func() { _ = srv.ListenAndServe() }() go func() { _ = srv.ListenAndServe() }()
logger.WithFields(logrus.Fields{ logger.Info("started api", zap.Int("port", port))
"port": port,
}).Info("started api")
<-ctx.Done() <-ctx.Done()

View file

@ -13,8 +13,8 @@ import (
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool" "github.com/jackc/pgx/v4/pgxpool"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.uber.org/zap"
"github.com/christianselig/apollo-backend/internal/cmdutil" "github.com/christianselig/apollo-backend/internal/cmdutil"
"github.com/christianselig/apollo-backend/internal/domain" "github.com/christianselig/apollo-backend/internal/domain"
@ -29,7 +29,8 @@ func SchedulerCmd(ctx context.Context) *cobra.Command {
Args: cobra.ExactArgs(0), Args: cobra.ExactArgs(0),
Short: "Schedules jobs and runs several maintenance tasks periodically.", Short: "Schedules jobs and runs several maintenance tasks periodically.",
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
logger := cmdutil.NewLogrusLogger(false) logger := cmdutil.NewLogger(false)
defer func() { _ = logger.Sync() }()
statsd, err := cmdutil.NewStatsdClient() statsd, err := cmdutil.NewStatsdClient()
if err != nil { if err != nil {
@ -126,71 +127,56 @@ func evalScript(ctx context.Context, redis *redis.Client) (string, error) {
return redis.ScriptLoad(ctx, lua).Result() return redis.ScriptLoad(ctx, lua).Result()
} }
func pruneAccounts(ctx context.Context, logger *logrus.Logger, pool *pgxpool.Pool) { func pruneAccounts(ctx context.Context, logger *zap.Logger, pool *pgxpool.Pool) {
expiry := time.Now().Add(-domain.StaleTokenThreshold) expiry := time.Now().Add(-domain.StaleTokenThreshold)
ar := repository.NewPostgresAccount(pool) ar := repository.NewPostgresAccount(pool)
stale, err := ar.PruneStale(ctx, expiry) stale, err := ar.PruneStale(ctx, expiry)
if err != nil { if err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to clean stale accounts", zap.Error(err))
"err": err,
}).Error("failed cleaning stale accounts")
return return
} }
orphaned, err := ar.PruneOrphaned(ctx) orphaned, err := ar.PruneOrphaned(ctx)
if err != nil { if err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to clean orphaned accounts", zap.Error(err))
"err": err,
}).Error("failed cleaning orphaned accounts")
return return
} }
if count := stale + orphaned; count > 0 { if count := stale + orphaned; count > 0 {
logger.WithFields(logrus.Fields{ logger.Info("pruned accounts", zap.Int64("stale", stale), zap.Int64("orphaned", orphaned))
"stale": stale,
"orphaned": orphaned,
}).Info("pruned accounts")
} }
} }
func pruneDevices(ctx context.Context, logger *logrus.Logger, pool *pgxpool.Pool) { func pruneDevices(ctx context.Context, logger *zap.Logger, pool *pgxpool.Pool) {
now := time.Now() now := time.Now()
dr := repository.NewPostgresDevice(pool) dr := repository.NewPostgresDevice(pool)
count, err := dr.PruneStale(ctx, now) count, err := dr.PruneStale(ctx, now)
if err != nil { if err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to clean stale devices", zap.Error(err))
"err": err,
}).Error("failed cleaning stale devices")
return return
} }
if count > 0 { if count > 0 {
logger.WithFields(logrus.Fields{ logger.Info("pruned devices", zap.Int64("count", count))
"count": count,
}).Info("pruned devices")
} }
} }
func cleanQueues(logger *logrus.Logger, jobsConn rmq.Connection) { func cleanQueues(logger *zap.Logger, jobsConn rmq.Connection) {
cleaner := rmq.NewCleaner(jobsConn) cleaner := rmq.NewCleaner(jobsConn)
count, err := cleaner.Clean() count, err := cleaner.Clean()
if err != nil { if err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to clean jobs from queues", zap.Error(err))
"err": err,
}).Error("failed cleaning jobs from queues")
return return
} }
if count > 0 { if count > 0 {
logger.WithFields(logrus.Fields{ logger.Info("returned jobs to queues", zap.Int64("count", count))
"count": count,
}).Info("returned jobs to queues")
} }
} }
func reportStats(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, pool *pgxpool.Pool) { func reportStats(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, pool *pgxpool.Pool) {
var ( var (
count int64 count int64
@ -209,14 +195,11 @@ func reportStats(ctx context.Context, logger *logrus.Logger, statsd *statsd.Clie
_ = pool.QueryRow(ctx, metric.query).Scan(&count) _ = pool.QueryRow(ctx, metric.query).Scan(&count)
_ = statsd.Gauge(metric.name, float64(count), []string{}, 1) _ = statsd.Gauge(metric.name, float64(count), []string{}, 1)
logger.WithFields(logrus.Fields{ logger.Debug("fetched metrics", zap.String("metric", metric.name), zap.Int64("count", count))
"count": count,
"metric": metric.name,
}).Debug("fetched metrics")
} }
} }
func enqueueUsers(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, pool *pgxpool.Pool, queue rmq.Queue) { func enqueueUsers(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, pool *pgxpool.Pool, queue rmq.Queue) {
now := time.Now() now := time.Now()
next := now.Add(domain.NotificationCheckInterval) next := now.Add(domain.NotificationCheckInterval)
@ -255,9 +238,7 @@ func enqueueUsers(ctx context.Context, logger *logrus.Logger, statsd *statsd.Cli
}) })
if err != nil { if err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to fetch batch of users", zap.Error(err))
"err": err,
}).Error("failed to fetch batch of users")
return return
} }
@ -265,10 +246,7 @@ func enqueueUsers(ctx context.Context, logger *logrus.Logger, statsd *statsd.Cli
return return
} }
logger.WithFields(logrus.Fields{ logger.Debug("enqueueing user batch", zap.Int("count", len(ids)), zap.Time("start", now))
"count": len(ids),
"start": now,
}).Debug("enqueueing user batch")
batchIds := make([]string, len(ids)) batchIds := make([]string, len(ids))
for i, id := range ids { for i, id := range ids {
@ -276,13 +254,11 @@ func enqueueUsers(ctx context.Context, logger *logrus.Logger, statsd *statsd.Cli
} }
if err = queue.Publish(batchIds...); err != nil { if err = queue.Publish(batchIds...); err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to enqueue user batch", zap.Error(err))
"err": err,
}).Error("failed to enqueue user")
} }
} }
func enqueueSubreddits(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, pool *pgxpool.Pool, queues []rmq.Queue) { func enqueueSubreddits(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, pool *pgxpool.Pool, queues []rmq.Queue) {
now := time.Now() now := time.Now()
next := now.Add(domain.SubredditCheckInterval) next := now.Add(domain.SubredditCheckInterval)
@ -321,9 +297,7 @@ func enqueueSubreddits(ctx context.Context, logger *logrus.Logger, statsd *stats
}) })
if err != nil { if err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to fetch batch of subreddits", zap.Error(err))
"err": err,
}).Error("failed to fetch batch of subreddits")
return return
} }
@ -331,10 +305,7 @@ func enqueueSubreddits(ctx context.Context, logger *logrus.Logger, statsd *stats
return return
} }
logger.WithFields(logrus.Fields{ logger.Debug("enqueueing subreddit batch", zap.Int("count", len(ids)), zap.Time("start", now))
"count": len(ids),
"start": now,
}).Debug("enqueueing subreddit batch")
batchIds := make([]string, len(ids)) batchIds := make([]string, len(ids))
for i, id := range ids { for i, id := range ids {
@ -343,16 +314,13 @@ func enqueueSubreddits(ctx context.Context, logger *logrus.Logger, statsd *stats
for _, queue := range queues { for _, queue := range queues {
if err = queue.Publish(batchIds...); err != nil { if err = queue.Publish(batchIds...); err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to enqueue subreddit batch", zap.Error(err))
"queue": queue,
"err": err,
}).Error("failed to enqueue subreddit")
} }
} }
} }
func enqueueStuckAccounts(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, pool *pgxpool.Pool, queue rmq.Queue) { func enqueueStuckAccounts(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, pool *pgxpool.Pool, queue rmq.Queue) {
now := time.Now() now := time.Now()
next := now.Add(domain.StuckNotificationCheckInterval) next := now.Add(domain.StuckNotificationCheckInterval)
@ -391,9 +359,7 @@ func enqueueStuckAccounts(ctx context.Context, logger *logrus.Logger, statsd *st
}) })
if err != nil { if err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to fetch accounts", zap.Error(err))
"err": err,
}).Error("failed to fetch possible stuck accounts")
return return
} }
@ -401,10 +367,7 @@ func enqueueStuckAccounts(ctx context.Context, logger *logrus.Logger, statsd *st
return return
} }
logger.WithFields(logrus.Fields{ logger.Debug("enqueueing stuck account batch", zap.Int("count", len(ids)), zap.Time("start", now))
"count": len(ids),
"start": now,
}).Debug("enqueueing stuck account batch")
batchIds := make([]string, len(ids)) batchIds := make([]string, len(ids))
for i, id := range ids { for i, id := range ids {
@ -412,14 +375,11 @@ func enqueueStuckAccounts(ctx context.Context, logger *logrus.Logger, statsd *st
} }
if err = queue.Publish(batchIds...); err != nil { if err = queue.Publish(batchIds...); err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to enqueue stuck account batch", zap.Error(err))
"queue": queue,
"err": err,
}).Error("failed to enqueue stuck accounts")
} }
} }
func enqueueAccounts(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, pool *pgxpool.Pool, redisConn *redis.Client, luaSha string, queue rmq.Queue) { func enqueueAccounts(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, pool *pgxpool.Pool, redisConn *redis.Client, luaSha string, queue rmq.Queue) {
now := time.Now() now := time.Now()
next := now.Add(domain.NotificationCheckInterval) next := now.Add(domain.NotificationCheckInterval)
@ -461,9 +421,7 @@ func enqueueAccounts(ctx context.Context, logger *logrus.Logger, statsd *statsd.
}) })
if err != nil { if err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to fetch batch of accounts", zap.Error(err))
"err": err,
}).Error("failed to fetch batch of accounts")
return return
} }
@ -471,10 +429,8 @@ func enqueueAccounts(ctx context.Context, logger *logrus.Logger, statsd *statsd.
return return
} }
logger.WithFields(logrus.Fields{ logger.Debug("enqueueing account batch", zap.Int("count", len(ids)), zap.Time("start", now))
"count": len(ids),
"start": now,
}).Debug("enqueueing account batch")
// Split ids in batches // Split ids in batches
for i := 0; i < len(ids); i += batchSize { for i := 0; i < len(ids); i += batchSize {
j := i + batchSize j := i + batchSize
@ -483,16 +439,11 @@ func enqueueAccounts(ctx context.Context, logger *logrus.Logger, statsd *statsd.
} }
batch := Int64Slice(ids[i:j]) batch := Int64Slice(ids[i:j])
logger.WithFields(logrus.Fields{ logger.Debug("enqueueing batch", zap.Int("len", len(batch)))
"len": len(batch),
}).Debug("enqueueing batch")
res, err := redisConn.EvalSha(ctx, luaSha, []string{"locks:accounts"}, batch).Result() res, err := redisConn.EvalSha(ctx, luaSha, []string{"locks:accounts"}, batch).Result()
if err != nil { if err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to check for locked accounts", zap.Error(err))
"err": err,
}).Error("failed to check for locked accounts")
} }
vals := res.([]interface{}) vals := res.([]interface{})
@ -509,17 +460,11 @@ func enqueueAccounts(ctx context.Context, logger *logrus.Logger, statsd *statsd.
} }
if err = queue.Publish(batchIds...); err != nil { if err = queue.Publish(batchIds...); err != nil {
logger.WithFields(logrus.Fields{ logger.Error("failed to enqueue account batch", zap.Error(err))
"err": err,
}).Error("failed to enqueue account")
} }
} }
logger.WithFields(logrus.Fields{ logger.Debug("done enqueueing account batch", zap.Int("count", enqueued), zap.Int("skipped", skipped), zap.Time("start", now))
"count": enqueued,
"skipped": skipped,
"start": now,
}).Debug("done enqueueing account batch")
} }
type Int64Slice []int64 type Int64Slice []int64

View file

@ -34,7 +34,8 @@ func WorkerCmd(ctx context.Context) *cobra.Command {
return fmt.Errorf("need a queue to work on") return fmt.Errorf("need a queue to work on")
} }
logger := cmdutil.NewLogrusLogger(false) logger := cmdutil.NewLogger(false)
defer func() { _ = logger.Sync() }()
tag := fmt.Sprintf("worker:%s", queueID) tag := fmt.Sprintf("worker:%s", queueID)
statsd, err := cmdutil.NewStatsdClient(tag) statsd, err := cmdutil.NewStatsdClient(tag)

View file

@ -9,19 +9,13 @@ import (
"github.com/adjust/rmq/v4" "github.com/adjust/rmq/v4"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/jackc/pgx/v4/pgxpool" "github.com/jackc/pgx/v4/pgxpool"
"github.com/sirupsen/logrus" "go.uber.org/zap"
) )
func NewLogrusLogger(debug bool) *logrus.Logger { func NewLogger(debug bool) *zap.Logger {
logger := logrus.New() logger, _ := zap.NewProduction()
if debug || os.Getenv("ENV") != "production" {
if debug || os.Getenv("ENV") == "" { logger, _ = zap.NewDevelopment()
logger.SetLevel(logrus.DebugLevel)
} else {
logger.SetFormatter(&logrus.TextFormatter{
DisableColors: true,
FullTimestamp: true,
})
} }
return logger return logger
@ -73,13 +67,11 @@ func NewDatabasePool(ctx context.Context, maxConns int) (*pgxpool.Pool, error) {
return pgxpool.ConnectConfig(ctx, config) return pgxpool.ConnectConfig(ctx, config)
} }
func NewQueueClient(logger *logrus.Logger, conn *redis.Client, identifier string) (rmq.Connection, error) { func NewQueueClient(logger *zap.Logger, conn *redis.Client, identifier string) (rmq.Connection, error) {
errChan := make(chan error, 10) errChan := make(chan error, 10)
go func() { go func() {
for err := range errChan { for err := range errChan {
logger.WithFields(logrus.Fields{ logger.Error("error occurred within queue", zap.Error(err))
"err": err,
}).Error("error occured with queue")
} }
}() }()

View file

@ -14,7 +14,7 @@ import (
"github.com/sideshow/apns2" "github.com/sideshow/apns2"
"github.com/sideshow/apns2/payload" "github.com/sideshow/apns2/payload"
"github.com/sideshow/apns2/token" "github.com/sideshow/apns2/token"
"github.com/sirupsen/logrus" "go.uber.org/zap"
"github.com/christianselig/apollo-backend/internal/domain" "github.com/christianselig/apollo-backend/internal/domain"
"github.com/christianselig/apollo-backend/internal/reddit" "github.com/christianselig/apollo-backend/internal/reddit"
@ -33,7 +33,7 @@ const (
type notificationsWorker struct { type notificationsWorker struct {
context.Context context.Context
logger *logrus.Logger logger *zap.Logger
statsd *statsd.Client statsd *statsd.Client
db *pgxpool.Pool db *pgxpool.Pool
redis *redis.Client redis *redis.Client
@ -47,7 +47,7 @@ type notificationsWorker struct {
deviceRepo domain.DeviceRepository deviceRepo domain.DeviceRepository
} }
func NewNotificationsWorker(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, db *pgxpool.Pool, redis *redis.Client, queue rmq.Connection, consumers int) Worker { func NewNotificationsWorker(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, db *pgxpool.Pool, redis *redis.Client, queue rmq.Connection, consumers int) Worker {
reddit := reddit.NewClient( reddit := reddit.NewClient(
os.Getenv("REDDIT_CLIENT_ID"), os.Getenv("REDDIT_CLIENT_ID"),
os.Getenv("REDDIT_CLIENT_SECRET"), os.Getenv("REDDIT_CLIENT_SECRET"),
@ -92,9 +92,7 @@ func (nw *notificationsWorker) Start() error {
return err return err
} }
nw.logger.WithFields(logrus.Fields{ nw.logger.Info("starting up notifications worker", zap.Int("consumers", nw.consumers))
"numConsumers": nw.consumers,
}).Info("starting up notifications worker")
prefetchLimit := int64(nw.consumers * 2) prefetchLimit := int64(nw.consumers * 2)
@ -139,40 +137,28 @@ func NewNotificationsConsumer(nw *notificationsWorker, tag int) *notificationsCo
func (nc *notificationsConsumer) Consume(delivery rmq.Delivery) { func (nc *notificationsConsumer) Consume(delivery rmq.Delivery) {
defer func() { defer func() {
lockKey := fmt.Sprintf("locks:accounts:%s", delivery.Payload()) key := fmt.Sprintf("locks:accounts:%s", delivery.Payload())
if err := nc.redis.Del(nc, lockKey).Err(); err != nil { if err := nc.redis.Del(nc, key).Err(); err != nil {
nc.logger.WithFields(logrus.Fields{ nc.logger.Error("failed to remove account lock", zap.Error(err), zap.String("key", key))
"lockKey": lockKey,
"err": err,
}).Error("failed to remove lock")
} }
}() }()
nc.logger.WithFields(logrus.Fields{
"account#id": delivery.Payload(),
}).Debug("starting job")
id, err := strconv.ParseInt(delivery.Payload(), 10, 64) id, err := strconv.ParseInt(delivery.Payload(), 10, 64)
if err != nil { if err != nil {
nc.logger.WithFields(logrus.Fields{ nc.logger.Error("failed to parse account id from payload", zap.Error(err), zap.String("payload", delivery.Payload()))
"account#id": delivery.Payload(),
"err": err,
}).Error("failed to parse account ID")
_ = delivery.Reject() _ = delivery.Reject()
return return
} }
nc.logger.Debug("starting job", zap.Int64("account#id", id))
defer func() { _ = delivery.Ack() }() defer func() { _ = delivery.Ack() }()
now := time.Now() now := time.Now()
account, err := nc.accountRepo.GetByID(nc, id) account, err := nc.accountRepo.GetByID(nc, id)
if err != nil { if err != nil {
nc.logger.WithFields(logrus.Fields{ nc.logger.Error("failed to fetch account from database", zap.Error(err), zap.Int64("account#id", id))
"account#id": id,
"err": err,
}).Error("failed to fetch account from database")
return return
} }
@ -184,35 +170,39 @@ func (nc *notificationsConsumer) Consume(delivery rmq.Delivery) {
defer func(acc *domain.Account) { defer func(acc *domain.Account) {
if err = nc.accountRepo.Update(nc, acc); err != nil { if err = nc.accountRepo.Update(nc, acc); err != nil {
nc.logger.WithFields(logrus.Fields{ nc.logger.Error("failed to update account",
"account#username": account.NormalizedUsername(), zap.Error(err),
"err": err, zap.Int64("account#id", id),
}).Error("failed to update account") zap.String("account#username", account.NormalizedUsername()),
)
} }
}(&account) }(&account)
rac := nc.reddit.NewAuthenticatedClient(account.AccountID, account.RefreshToken, account.AccessToken) rac := nc.reddit.NewAuthenticatedClient(account.AccountID, account.RefreshToken, account.AccessToken)
if account.TokenExpiresAt.Before(now) { if account.TokenExpiresAt.Before(now) {
nc.logger.WithFields(logrus.Fields{ nc.logger.Debug("refreshing reddit token",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
}).Debug("refreshing reddit token") zap.String("account#username", account.NormalizedUsername()),
)
tokens, err := rac.RefreshTokens(nc) tokens, err := rac.RefreshTokens(nc)
if err != nil { if err != nil {
if err != reddit.ErrOauthRevoked { if err != reddit.ErrOauthRevoked {
nc.logger.WithFields(logrus.Fields{ nc.logger.Error("failed to refresh reddit tokens",
"account#username": account.NormalizedUsername(), zap.Error(err),
"err": err, zap.Int64("account#id", id),
}).Error("failed to refresh reddit tokens") zap.String("account#username", account.NormalizedUsername()),
)
return return
} }
err = nc.deleteAccount(account) err = nc.deleteAccount(account)
if err != nil { if err != nil {
nc.logger.WithFields(logrus.Fields{ nc.logger.Error("failed to remove revoked account",
"account#username": account.NormalizedUsername(), zap.Error(err),
"err": err, zap.Int64("account#id", id),
}).Error("failed to remove revoked account") zap.String("account#username", account.NormalizedUsername()),
)
} }
return return
@ -234,9 +224,7 @@ func (nc *notificationsConsumer) Consume(delivery rmq.Delivery) {
_ = nc.statsd.Histogram("apollo.queue.delay", float64(latency.Milliseconds()), []string{}, rate) _ = nc.statsd.Histogram("apollo.queue.delay", float64(latency.Milliseconds()), []string{}, rate)
} }
nc.logger.WithFields(logrus.Fields{ nc.logger.Debug("fetching message inbox", zap.Int64("account#id", id), zap.String("account#username", account.NormalizedUsername()))
"account#username": account.NormalizedUsername(),
}).Debug("fetching message inbox")
opts := []reddit.RequestOption{reddit.WithQuery("limit", "10")} opts := []reddit.RequestOption{reddit.WithQuery("limit", "10")}
if account.LastMessageID != "" { if account.LastMessageID != "" {
@ -249,38 +237,42 @@ func (nc *notificationsConsumer) Consume(delivery rmq.Delivery) {
case reddit.ErrTimeout, reddit.ErrRateLimited: // Don't log timeouts or rate limits case reddit.ErrTimeout, reddit.ErrRateLimited: // Don't log timeouts or rate limits
break break
case reddit.ErrOauthRevoked: case reddit.ErrOauthRevoked:
err = nc.deleteAccount(account) if err = nc.deleteAccount(account); err != nil {
if err != nil { nc.logger.Error("failed to remove revoked account",
nc.logger.WithFields(logrus.Fields{ zap.Error(err),
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
"err": err, zap.String("account#username", account.NormalizedUsername()),
}).Error("failed to remove revoked account") )
return } else {
nc.logger.Info("removed revoked account",
zap.Int64("account#id", id),
zap.String("account#username", account.NormalizedUsername()),
)
} }
nc.logger.WithFields(logrus.Fields{
"account#username": account.NormalizedUsername(),
}).Info("removed revoked account")
default: default:
nc.logger.WithFields(logrus.Fields{ nc.logger.Error("failed to fetch message inbox",
"account#username": account.NormalizedUsername(), zap.Error(err),
"err": err, zap.Int64("account#id", id),
}).Error("failed to fetch message inbox") zap.String("account#username", account.NormalizedUsername()),
)
} }
return return
} }
// Figure out where we stand // Figure out where we stand
if msgs.Count == 0 { if msgs.Count == 0 {
nc.logger.WithFields(logrus.Fields{ nc.logger.Debug("no new messages, bailing early",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
}).Debug("no new messages, bailing early") zap.String("account#username", account.NormalizedUsername()),
)
return return
} }
nc.logger.WithFields(logrus.Fields{ nc.logger.Debug("fetched messages",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
"count": msgs.Count, zap.String("account#username", account.NormalizedUsername()),
}).Debug("fetched messages") zap.Int("count", msgs.Count),
)
for _, msg := range msgs.Children { for _, msg := range msgs.Children {
if !msg.IsDeleted() { if !msg.IsDeleted() {
@ -291,25 +283,28 @@ func (nc *notificationsConsumer) Consume(delivery rmq.Delivery) {
// Let's populate this with the latest message so we don't flood users with stuff // Let's populate this with the latest message so we don't flood users with stuff
if newAccount { if newAccount {
nc.logger.WithFields(logrus.Fields{ nc.logger.Debug("populating first message id to prevent spamming",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
}).Debug("populating first message ID to prevent spamming") zap.String("account#username", account.NormalizedUsername()),
)
return return
} }
devices, err := nc.deviceRepo.GetInboxNotifiableByAccountID(nc, account.ID) devices, err := nc.deviceRepo.GetInboxNotifiableByAccountID(nc, account.ID)
if err != nil { if err != nil {
nc.logger.WithFields(logrus.Fields{ nc.logger.Error("failed to fetch account devices",
"account#username": account.NormalizedUsername(), zap.Error(err),
"err": err, zap.Int64("account#id", id),
}).Error("failed to fetch account devices") zap.String("account#username", account.NormalizedUsername()),
)
return return
} }
if len(devices) == 0 { if len(devices) == 0 {
nc.logger.WithFields(logrus.Fields{ nc.logger.Debug("no notifiable devices, bailing early",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
}).Debug("no notifiable devices, finishing job") zap.String("account#username", account.NormalizedUsername()),
)
return return
} }
@ -335,21 +330,24 @@ func (nc *notificationsConsumer) Consume(delivery rmq.Delivery) {
res, err := client.Push(notification) res, err := client.Push(notification)
if err != nil || !res.Sent() { if err != nil || !res.Sent() {
_ = nc.statsd.Incr("apns.notification.errors", []string{}, 1) _ = nc.statsd.Incr("apns.notification.errors", []string{}, 1)
nc.logger.WithFields(logrus.Fields{ nc.logger.Error("failed to send notification",
"account#username": account.NormalizedUsername(), zap.Error(err),
"err": err, zap.Int64("account#id", id),
"status": res.StatusCode, zap.String("account#username", account.NormalizedUsername()),
"reason": res.Reason, zap.String("device#token", device.APNSToken),
}).Error("failed to send notification") zap.Int("response#status", res.StatusCode),
zap.String("response#reason", res.Reason),
)
// Delete device as notifications might have been disabled here // Delete device as notifications might have been disabled here
_ = nc.deviceRepo.Delete(nc, device.APNSToken) _ = nc.deviceRepo.Delete(nc, device.APNSToken)
} else { } else {
_ = nc.statsd.Incr("apns.notification.sent", []string{}, 1) _ = nc.statsd.Incr("apns.notification.sent", []string{}, 1)
nc.logger.WithFields(logrus.Fields{ nc.logger.Info("sent notification",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
"token": device.APNSToken, zap.String("account#username", account.NormalizedUsername()),
}).Info("sent notification") zap.String("device#token", device.APNSToken),
)
} }
} }
} }
@ -357,9 +355,10 @@ func (nc *notificationsConsumer) Consume(delivery rmq.Delivery) {
ev := fmt.Sprintf("Sent notification to /u/%s (x%d)", account.Username, msgs.Count) ev := fmt.Sprintf("Sent notification to /u/%s (x%d)", account.Username, msgs.Count)
_ = nc.statsd.SimpleEvent(ev, "") _ = nc.statsd.SimpleEvent(ev, "")
nc.logger.WithFields(logrus.Fields{ nc.logger.Debug("finishing job",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
}).Debug("finishing job") zap.String("account#username", account.NormalizedUsername()),
)
} }
func (nc *notificationsConsumer) deleteAccount(account domain.Account) error { func (nc *notificationsConsumer) deleteAccount(account domain.Account) error {

View file

@ -10,7 +10,7 @@ import (
"github.com/adjust/rmq/v4" "github.com/adjust/rmq/v4"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/jackc/pgx/v4/pgxpool" "github.com/jackc/pgx/v4/pgxpool"
"github.com/sirupsen/logrus" "go.uber.org/zap"
"github.com/christianselig/apollo-backend/internal/domain" "github.com/christianselig/apollo-backend/internal/domain"
"github.com/christianselig/apollo-backend/internal/reddit" "github.com/christianselig/apollo-backend/internal/reddit"
@ -20,7 +20,7 @@ import (
type stuckNotificationsWorker struct { type stuckNotificationsWorker struct {
context.Context context.Context
logger *logrus.Logger logger *zap.Logger
statsd *statsd.Client statsd *statsd.Client
db *pgxpool.Pool db *pgxpool.Pool
redis *redis.Client redis *redis.Client
@ -32,7 +32,7 @@ type stuckNotificationsWorker struct {
accountRepo domain.AccountRepository accountRepo domain.AccountRepository
} }
func NewStuckNotificationsWorker(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, db *pgxpool.Pool, redis *redis.Client, queue rmq.Connection, consumers int) Worker { func NewStuckNotificationsWorker(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, db *pgxpool.Pool, redis *redis.Client, queue rmq.Connection, consumers int) Worker {
reddit := reddit.NewClient( reddit := reddit.NewClient(
os.Getenv("REDDIT_CLIENT_ID"), os.Getenv("REDDIT_CLIENT_ID"),
os.Getenv("REDDIT_CLIENT_SECRET"), os.Getenv("REDDIT_CLIENT_SECRET"),
@ -61,9 +61,7 @@ func (snw *stuckNotificationsWorker) Start() error {
return err return err
} }
snw.logger.WithFields(logrus.Fields{ snw.logger.Info("starting up stuck notifications worker", zap.Int("consumers", snw.consumers))
"numConsumers": snw.consumers,
}).Info("starting up stuck notifications worker")
prefetchLimit := int64(snw.consumers * 2) prefetchLimit := int64(snw.consumers * 2)
@ -102,69 +100,67 @@ func NewStuckNotificationsConsumer(snw *stuckNotificationsWorker, tag int) *stuc
} }
func (snc *stuckNotificationsConsumer) Consume(delivery rmq.Delivery) { func (snc *stuckNotificationsConsumer) Consume(delivery rmq.Delivery) {
snc.logger.WithFields(logrus.Fields{
"account#id": delivery.Payload(),
}).Debug("starting job")
id, err := strconv.ParseInt(delivery.Payload(), 10, 64) id, err := strconv.ParseInt(delivery.Payload(), 10, 64)
if err != nil { if err != nil {
snc.logger.WithFields(logrus.Fields{ snc.logger.Error("failed to parse account id from payload", zap.Error(err), zap.String("payload", delivery.Payload()))
"account#id": delivery.Payload(),
"err": err,
}).Error("failed to parse account ID")
_ = delivery.Reject() _ = delivery.Reject()
return return
} }
snc.logger.Debug("starting job", zap.Int64("account#id", id))
defer func() { _ = delivery.Ack() }() defer func() { _ = delivery.Ack() }()
account, err := snc.accountRepo.GetByID(snc, id) account, err := snc.accountRepo.GetByID(snc, id)
if err != nil { if err != nil {
snc.logger.WithFields(logrus.Fields{ snc.logger.Error("failed to fetch account from database", zap.Error(err), zap.Int64("account#id", id))
"err": err,
}).Error("failed to fetch account from database")
return return
} }
if account.LastMessageID == "" { if account.LastMessageID == "" {
snc.logger.WithFields(logrus.Fields{ snc.logger.Debug("account has no messages, bailing early",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
}).Debug("account has no messages, returning") zap.String("account#username", account.NormalizedUsername()),
)
return return
} }
rac := snc.reddit.NewAuthenticatedClient(account.AccountID, account.RefreshToken, account.AccessToken) rac := snc.reddit.NewAuthenticatedClient(account.AccountID, account.RefreshToken, account.AccessToken)
snc.logger.WithFields(logrus.Fields{ snc.logger.Debug("fetching last thing",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
"thing#id": account.LastMessageID, zap.String("account#username", account.NormalizedUsername()),
}).Debug("fetching last thing") )
kind := account.LastMessageID[:2] kind := account.LastMessageID[:2]
var things *reddit.ListingResponse var things *reddit.ListingResponse
if kind == "t4" { if kind == "t4" {
snc.logger.WithFields(logrus.Fields{ snc.logger.Debug("checking last thing via inbox",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
"thing#id": account.LastMessageID, zap.String("account#username", account.NormalizedUsername()),
}).Debug("checking last thing via inbox") )
things, err = rac.MessageInbox(snc) things, err = rac.MessageInbox(snc)
if err != nil { if err != nil {
if err != reddit.ErrRateLimited { if err != reddit.ErrRateLimited {
snc.logger.WithFields(logrus.Fields{ snc.logger.Error("failed to fetch last thing via inbox",
"err": err, zap.Error(err),
}).Error("failed to fetch last thing via inbox") zap.Int64("account#id", id),
zap.String("account#username", account.NormalizedUsername()),
)
} }
return return
} }
} else { } else {
things, err = rac.AboutInfo(snc, account.LastMessageID) things, err = rac.AboutInfo(snc, account.LastMessageID)
if err != nil { if err != nil {
snc.logger.WithFields(logrus.Fields{ snc.logger.Error("failed to fetch last thing",
"err": err, zap.Error(err),
}).Error("failed to fetch last thing") zap.Int64("account#id", id),
zap.String("account#username", account.NormalizedUsername()),
)
return return
} }
} }
@ -185,9 +181,11 @@ func (snc *stuckNotificationsConsumer) Consume(delivery rmq.Delivery) {
sthings, err := rac.MessageInbox(snc) sthings, err := rac.MessageInbox(snc)
if err != nil { if err != nil {
snc.logger.WithFields(logrus.Fields{ snc.logger.Error("failed to check inbox",
"err": err, zap.Error(err),
}).Error("failed to check inbox") zap.Int64("account#id", id),
zap.String("account#username", account.NormalizedUsername()),
)
return return
} }
@ -199,52 +197,59 @@ func (snc *stuckNotificationsConsumer) Consume(delivery rmq.Delivery) {
} }
if !found { if !found {
snc.logger.WithFields(logrus.Fields{ snc.logger.Debug("thing exists, but not on inbox, marking as deleted",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
"thing#id": account.LastMessageID, zap.String("account#username", account.NormalizedUsername()),
}).Debug("thing exists, but not in inbox, marking as deleted") zap.String("thing#id", account.LastMessageID),
)
break break
} }
snc.logger.WithFields(logrus.Fields{ snc.logger.Debug("thing exists, bailing early",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
"thing#id": account.LastMessageID, zap.String("account#username", account.NormalizedUsername()),
}).Debug("thing exists, returning") zap.String("thing#id", account.LastMessageID),
)
return return
} }
} }
snc.logger.WithFields(logrus.Fields{ snc.logger.Info("thing got deleted, resetting",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
"thing#id": account.LastMessageID, zap.String("account#username", account.NormalizedUsername()),
}).Info("thing got deleted, resetting") zap.String("thing#id", account.LastMessageID),
)
if kind != "t4" { if kind != "t4" {
snc.logger.WithFields(logrus.Fields{ snc.logger.Debug("getting message inbox to find last good thing",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
}).Debug("getting message inbox to determine last good thing") zap.String("account#username", account.NormalizedUsername()),
)
things, err = rac.MessageInbox(snc) things, err = rac.MessageInbox(snc)
if err != nil { if err != nil {
snc.logger.WithFields(logrus.Fields{ snc.logger.Error("failed to check inbox",
"account#username": account.NormalizedUsername(), zap.Error(err),
"err": err, zap.Int64("account#id", id),
}).Error("failed to get message inbox") zap.String("account#username", account.NormalizedUsername()),
)
return return
} }
} }
account.LastMessageID = "" account.LastMessageID = ""
snc.logger.WithFields(logrus.Fields{ snc.logger.Debug("calculating last good thing",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
}).Debug("calculating last good thing") zap.String("account#username", account.NormalizedUsername()),
)
for _, thing := range things.Children { for _, thing := range things.Children {
if thing.IsDeleted() { if thing.IsDeleted() {
snc.logger.WithFields(logrus.Fields{ snc.logger.Debug("thing got deleted, checking next",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
"thing#id": thing.FullName(), zap.String("account#username", account.NormalizedUsername()),
}).Debug("thing deleted, next") zap.String("thing#id", thing.FullName()),
)
continue continue
} }
@ -252,15 +257,17 @@ func (snc *stuckNotificationsConsumer) Consume(delivery rmq.Delivery) {
break break
} }
snc.logger.WithFields(logrus.Fields{ snc.logger.Debug("updating last good thing",
"account#username": account.NormalizedUsername(), zap.Int64("account#id", id),
"thing#id": account.LastMessageID, zap.String("account#username", account.NormalizedUsername()),
}).Debug("updating last good thing") zap.String("thing#id", account.LastMessageID),
)
if err := snc.accountRepo.Update(snc, &account); err != nil { if err := snc.accountRepo.Update(snc, &account); err != nil {
snc.logger.WithFields(logrus.Fields{ snc.logger.Error("failed to update account's last message id",
"account#username": account.NormalizedUsername(), zap.Error(err),
"err": err, zap.Int64("account#id", id),
}).Error("failed to update account's message id") zap.String("account#username", account.NormalizedUsername()),
)
} }
} }

View file

@ -16,7 +16,7 @@ import (
"github.com/sideshow/apns2" "github.com/sideshow/apns2"
"github.com/sideshow/apns2/payload" "github.com/sideshow/apns2/payload"
"github.com/sideshow/apns2/token" "github.com/sideshow/apns2/token"
"github.com/sirupsen/logrus" "go.uber.org/zap"
"github.com/christianselig/apollo-backend/internal/domain" "github.com/christianselig/apollo-backend/internal/domain"
"github.com/christianselig/apollo-backend/internal/reddit" "github.com/christianselig/apollo-backend/internal/reddit"
@ -26,7 +26,7 @@ import (
type subredditsWorker struct { type subredditsWorker struct {
context.Context context.Context
logger *logrus.Logger logger *zap.Logger
statsd *statsd.Client statsd *statsd.Client
db *pgxpool.Pool db *pgxpool.Pool
redis *redis.Client redis *redis.Client
@ -47,7 +47,7 @@ const (
subredditNotificationBodyFormat = "r/%s: \u201c%s\u201d" subredditNotificationBodyFormat = "r/%s: \u201c%s\u201d"
) )
func NewSubredditsWorker(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, db *pgxpool.Pool, redis *redis.Client, queue rmq.Connection, consumers int) Worker { func NewSubredditsWorker(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, db *pgxpool.Pool, redis *redis.Client, queue rmq.Connection, consumers int) Worker {
reddit := reddit.NewClient( reddit := reddit.NewClient(
os.Getenv("REDDIT_CLIENT_ID"), os.Getenv("REDDIT_CLIENT_ID"),
os.Getenv("REDDIT_CLIENT_SECRET"), os.Getenv("REDDIT_CLIENT_SECRET"),
@ -94,9 +94,7 @@ func (sw *subredditsWorker) Start() error {
return err return err
} }
sw.logger.WithFields(logrus.Fields{ sw.logger.Info("starting up subreddits worker", zap.Int("consumers", sw.consumers))
"numConsumers": sw.consumers,
}).Info("starting up subreddits worker")
prefetchLimit := int64(sw.consumers * 2) prefetchLimit := int64(sw.consumers * 2)
@ -140,44 +138,38 @@ func NewSubredditsConsumer(sw *subredditsWorker, tag int) *subredditsConsumer {
} }
func (sc *subredditsConsumer) Consume(delivery rmq.Delivery) { func (sc *subredditsConsumer) Consume(delivery rmq.Delivery) {
sc.logger.WithFields(logrus.Fields{
"subreddit#id": delivery.Payload(),
}).Debug("starting job")
id, err := strconv.ParseInt(delivery.Payload(), 10, 64) id, err := strconv.ParseInt(delivery.Payload(), 10, 64)
if err != nil { if err != nil {
sc.logger.WithFields(logrus.Fields{ sc.logger.Error("failed to parse subreddit id from payload", zap.Error(err), zap.String("payload", delivery.Payload()))
"subreddit#id": delivery.Payload(),
"err": err,
}).Error("failed to parse subreddit ID")
_ = delivery.Reject() _ = delivery.Reject()
return return
} }
sc.logger.Debug("starting job", zap.Int64("subreddit#id", id))
defer func() { _ = delivery.Ack() }() defer func() { _ = delivery.Ack() }()
subreddit, err := sc.subredditRepo.GetByID(sc, id) subreddit, err := sc.subredditRepo.GetByID(sc, id)
if err != nil { if err != nil {
sc.logger.WithFields(logrus.Fields{ sc.logger.Error("failed to fetch subreddit from database", zap.Error(err), zap.Int64("subreddit#id", id))
"err": err,
}).Error("failed to fetch subreddit from database")
return return
} }
watchers, err := sc.watcherRepo.GetBySubredditID(sc, subreddit.ID) watchers, err := sc.watcherRepo.GetBySubredditID(sc, subreddit.ID)
if err != nil { if err != nil {
sc.logger.WithFields(logrus.Fields{ sc.logger.Error("failed to fetch watchers from database",
"subreddit#id": subreddit.ID, zap.Error(err),
"err": err, zap.Int64("subreddit#id", id),
}).Error("failed to fetch watchers from database") zap.String("subreddit#name", subreddit.NormalizedName()),
)
return return
} }
if len(watchers) == 0 { if len(watchers) == 0 {
sc.logger.WithFields(logrus.Fields{ sc.logger.Debug("no watchers for subreddit, bailing early",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
}).Debug("no watchers for subreddit, finishing job") zap.String("subreddit#name", subreddit.NormalizedName()),
)
return return
} }
@ -188,17 +180,17 @@ func (sc *subredditsConsumer) Consume(delivery rmq.Delivery) {
seenPosts := map[string]bool{} seenPosts := map[string]bool{}
// Load 500 newest posts // Load 500 newest posts
sc.logger.WithFields(logrus.Fields{ sc.logger.Debug("loading up to 500 new posts",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
}).Debug("loading up to 500 new posts") )
for page := 0; page < 5; page++ { for page := 0; page < 5; page++ {
sc.logger.WithFields(logrus.Fields{ sc.logger.Debug("loading new posts",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
"page": page, zap.Int("page", page),
}).Debug("loading new posts") )
i := rand.Intn(len(watchers)) i := rand.Intn(len(watchers))
watcher := watchers[i] watcher := watchers[i]
@ -213,19 +205,21 @@ func (sc *subredditsConsumer) Consume(delivery rmq.Delivery) {
) )
if err != nil { if err != nil {
sc.logger.WithFields(logrus.Fields{ sc.logger.Error("failed fetchint new posts",
"subreddit#id": subreddit.ID, zap.Error(err),
"err": err, zap.Int64("subreddit#id", id),
}).Error("failed to fetch new posts") zap.String("subreddit#name", subreddit.NormalizedName()),
zap.Int("page", page),
)
continue continue
} }
sc.logger.WithFields(logrus.Fields{ sc.logger.Debug("loaded new posts",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
"count": sps.Count, zap.Int("page", page),
"page": page, zap.Int("count", sps.Count),
}).Debug("loaded new posts for page") )
// If it's empty, we're done // If it's empty, we're done
if sps.Count == 0 { if sps.Count == 0 {
@ -250,20 +244,20 @@ func (sc *subredditsConsumer) Consume(delivery rmq.Delivery) {
} }
if finished { if finished {
sc.logger.WithFields(logrus.Fields{ sc.logger.Debug("reached date threshold",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
"page": page, zap.Int("page", page),
}).Debug("reached date threshold") )
break break
} }
} }
// Load hot posts // Load hot posts
sc.logger.WithFields(logrus.Fields{ sc.logger.Debug("loading hot posts",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
}).Debug("loading hot posts") )
{ {
i := rand.Intn(len(watchers)) i := rand.Intn(len(watchers))
watcher := watchers[i] watcher := watchers[i]
@ -276,16 +270,17 @@ func (sc *subredditsConsumer) Consume(delivery rmq.Delivery) {
) )
if err != nil { if err != nil {
sc.logger.WithFields(logrus.Fields{ sc.logger.Error("failed to fetch hot posts",
"subreddit#id": subreddit.ID, zap.Error(err),
"err": err, zap.Int64("subreddit#id", id),
}).Error("failed to fetch hot posts") zap.String("subreddit#name", subreddit.NormalizedName()),
)
} else { } else {
sc.logger.WithFields(logrus.Fields{ sc.logger.Debug("loaded hot posts",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
"count": sps.Count, zap.Int("count", sps.Count),
}).Debug("loaded hot posts") )
for _, post := range sps.Children { for _, post := range sps.Children {
if post.CreatedAt.Before(threshold) { if post.CreatedAt.Before(threshold) {
@ -299,11 +294,11 @@ func (sc *subredditsConsumer) Consume(delivery rmq.Delivery) {
} }
} }
sc.logger.WithFields(logrus.Fields{ sc.logger.Debug("checking posts for watcher hits",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
"count": len(posts), zap.Int("count", len(posts)),
}).Debug("checking posts for hits") )
for _, post := range posts { for _, post := range posts {
lowcaseAuthor := strings.ToLower(post.Author) lowcaseAuthor := strings.ToLower(post.Author)
lowcaseTitle := strings.ToLower(post.Title) lowcaseTitle := strings.ToLower(post.Title)
@ -344,31 +339,30 @@ func (sc *subredditsConsumer) Consume(delivery rmq.Delivery) {
notified, _ := sc.redis.Get(sc, lockKey).Bool() notified, _ := sc.redis.Get(sc, lockKey).Bool()
if notified { if notified {
sc.logger.WithFields(logrus.Fields{ sc.logger.Debug("already notified, skipping",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
"watcher#id": watcher.ID, zap.Int64("watcher#id", watcher.ID),
"post#id": post.ID, zap.String("post#id", post.ID),
}).Debug("already notified, skipping") )
continue continue
} }
if err := sc.watcherRepo.IncrementHits(sc, watcher.ID); err != nil { if err := sc.watcherRepo.IncrementHits(sc, watcher.ID); err != nil {
sc.logger.WithFields(logrus.Fields{ sc.logger.Error("could not increment hits",
"subreddit#id": subreddit.ID, zap.Error(err),
"watcher#id": watcher.ID, zap.Int64("subreddit#id", id),
"err": err, zap.String("subreddit#name", subreddit.NormalizedName()),
}).Error("could not increment hits") zap.Int64("watcher#id", watcher.ID),
)
return return
} }
sc.logger.Debug("got a hit",
sc.logger.WithFields(logrus.Fields{ zap.Int64("subreddit#id", id),
"subreddit#id": subreddit.ID, zap.String("subreddit#name", subreddit.NormalizedName()),
"subreddit#name": subreddit.Name, zap.Int64("watcher#id", watcher.ID),
"watcher#id": watcher.ID, zap.String("post#id", post.ID),
"post#id": post.ID, )
}).Debug("got a hit")
sc.redis.SetEX(sc, lockKey, true, 24*time.Hour) sc.redis.SetEX(sc, lockKey, true, 24*time.Hour)
notifs = append(notifs, watcher) notifs = append(notifs, watcher)
@ -377,13 +371,12 @@ func (sc *subredditsConsumer) Consume(delivery rmq.Delivery) {
if len(notifs) == 0 { if len(notifs) == 0 {
continue continue
} }
sc.logger.Debug("got hits for post",
sc.logger.WithFields(logrus.Fields{ zap.Int64("subreddit#id", id),
"subreddit#id": subreddit.ID, zap.String("subreddit#name", subreddit.NormalizedName()),
"subreddit#name": subreddit.Name, zap.String("post#id", post.ID),
"post#id": post.ID, zap.Int("count", len(notifs)),
"count": len(notifs), )
}).Debug("got hits for post")
payload := payloadFromPost(post) payload := payloadFromPost(post)
@ -407,28 +400,31 @@ func (sc *subredditsConsumer) Consume(delivery rmq.Delivery) {
res, err := client.Push(notification) res, err := client.Push(notification)
if err != nil || !res.Sent() { if err != nil || !res.Sent() {
_ = sc.statsd.Incr("apns.notification.errors", []string{}, 1) _ = sc.statsd.Incr("apns.notification.errors", []string{}, 1)
sc.logger.WithFields(logrus.Fields{ sc.logger.Error("failed to send notification",
"subreddit#id": subreddit.ID, zap.Error(err),
"device#id": watcher.Device.ID, zap.Int64("subreddit#id", id),
"err": err, zap.String("subreddit#name", subreddit.NormalizedName()),
"status": res.StatusCode, zap.String("post#id", post.ID),
"reason": res.Reason, zap.String("apns", watcher.Device.APNSToken),
}).Error("failed to send notification") zap.Int("response#status", res.StatusCode),
zap.String("response#reason", res.Reason),
)
} else { } else {
_ = sc.statsd.Incr("apns.notification.sent", []string{}, 1) _ = sc.statsd.Incr("apns.notification.sent", []string{}, 1)
sc.logger.WithFields(logrus.Fields{ sc.logger.Info("sent notification",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"device#id": watcher.Device.ID, zap.String("subreddit#name", subreddit.NormalizedName()),
"device#token": watcher.Device.APNSToken, zap.String("post#id", post.ID),
}).Info("sent notification") zap.String("device#token", watcher.Device.APNSToken),
)
} }
} }
} }
sc.logger.WithFields(logrus.Fields{ sc.logger.Debug("finishing job",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
}).Debug("finishing job") )
} }
func payloadFromPost(post *reddit.Thing) *payload.Payload { func payloadFromPost(post *reddit.Thing) *payload.Payload {

View file

@ -15,7 +15,7 @@ import (
"github.com/sideshow/apns2" "github.com/sideshow/apns2"
"github.com/sideshow/apns2/payload" "github.com/sideshow/apns2/payload"
"github.com/sideshow/apns2/token" "github.com/sideshow/apns2/token"
"github.com/sirupsen/logrus" "go.uber.org/zap"
"github.com/christianselig/apollo-backend/internal/domain" "github.com/christianselig/apollo-backend/internal/domain"
"github.com/christianselig/apollo-backend/internal/reddit" "github.com/christianselig/apollo-backend/internal/reddit"
@ -25,7 +25,7 @@ import (
type trendingWorker struct { type trendingWorker struct {
context.Context context.Context
logger *logrus.Logger logger *zap.Logger
statsd *statsd.Client statsd *statsd.Client
redis *redis.Client redis *redis.Client
queue rmq.Connection queue rmq.Connection
@ -42,7 +42,7 @@ type trendingWorker struct {
const trendingNotificationTitleFormat = "🔥 r/%s Trending" const trendingNotificationTitleFormat = "🔥 r/%s Trending"
func NewTrendingWorker(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, db *pgxpool.Pool, redis *redis.Client, queue rmq.Connection, consumers int) Worker { func NewTrendingWorker(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, db *pgxpool.Pool, redis *redis.Client, queue rmq.Connection, consumers int) Worker {
reddit := reddit.NewClient( reddit := reddit.NewClient(
os.Getenv("REDDIT_CLIENT_ID"), os.Getenv("REDDIT_CLIENT_ID"),
os.Getenv("REDDIT_CLIENT_SECRET"), os.Getenv("REDDIT_CLIENT_SECRET"),
@ -88,9 +88,7 @@ func (tw *trendingWorker) Start() error {
return err return err
} }
tw.logger.WithFields(logrus.Fields{ tw.logger.Info("starting up trending subreddits worker", zap.Int("consumers", tw.consumers))
"numConsumers": tw.consumers,
}).Info("starting up trending worker")
prefetchLimit := int64(tw.consumers * 2) prefetchLimit := int64(tw.consumers * 2)
@ -134,44 +132,38 @@ func NewTrendingConsumer(tw *trendingWorker, tag int) *trendingConsumer {
} }
func (tc *trendingConsumer) Consume(delivery rmq.Delivery) { func (tc *trendingConsumer) Consume(delivery rmq.Delivery) {
tc.logger.WithFields(logrus.Fields{
"subreddit#id": delivery.Payload(),
}).Debug("starting job")
id, err := strconv.ParseInt(delivery.Payload(), 10, 64) id, err := strconv.ParseInt(delivery.Payload(), 10, 64)
if err != nil { if err != nil {
tc.logger.WithFields(logrus.Fields{ tc.logger.Error("failed to parse subreddit id from payload", zap.Error(err), zap.String("payload", delivery.Payload()))
"subreddit#id": delivery.Payload(),
"err": err,
}).Error("failed to parse subreddit ID")
_ = delivery.Reject() _ = delivery.Reject()
return return
} }
tc.logger.Debug("starting job", zap.Int64("subreddit#id", id))
defer func() { _ = delivery.Ack() }() defer func() { _ = delivery.Ack() }()
subreddit, err := tc.subredditRepo.GetByID(tc, id) subreddit, err := tc.subredditRepo.GetByID(tc, id)
if err != nil { if err != nil {
tc.logger.WithFields(logrus.Fields{ tc.logger.Error("failed to fetch subreddit from database", zap.Error(err), zap.Int64("subreddit#id", id))
"err": err,
}).Error("failed to fetch subreddit from database")
return return
} }
watchers, err := tc.watcherRepo.GetByTrendingSubredditID(tc, subreddit.ID) watchers, err := tc.watcherRepo.GetByTrendingSubredditID(tc, subreddit.ID)
if err != nil { if err != nil {
tc.logger.WithFields(logrus.Fields{ tc.logger.Error("failed to fetch watchers from database",
"subreddit#id": subreddit.ID, zap.Error(err),
"err": err, zap.Int64("subreddit#id", id),
}).Error("failed to fetch watchers from database") zap.String("subreddit#name", subreddit.NormalizedName()),
)
return return
} }
if len(watchers) == 0 { if len(watchers) == 0 {
tc.logger.WithFields(logrus.Fields{ tc.logger.Debug("no watchers for subreddit, bailing early",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
}).Debug("no watchers for trending, finishing job") zap.String("subreddit#name", subreddit.NormalizedName()),
)
return return
} }
@ -182,39 +174,44 @@ func (tc *trendingConsumer) Consume(delivery rmq.Delivery) {
tps, err := rac.SubredditTop(tc, subreddit.Name, reddit.WithQuery("t", "week")) tps, err := rac.SubredditTop(tc, subreddit.Name, reddit.WithQuery("t", "week"))
if err != nil { if err != nil {
tc.logger.WithFields(logrus.Fields{ tc.logger.Error("failed to fetch month's top posts",
"subreddit#id": subreddit.ID, zap.Error(err),
"subreddit#name": subreddit.Name, zap.Int64("subreddit#id", id),
"err": err, zap.String("subreddit#name", subreddit.NormalizedName()),
}).Error("failed to fetch month's top posts") )
return return
} }
tc.logger.WithFields(logrus.Fields{
"subreddit#id": subreddit.ID, tc.logger.Debug("loaded month's top posts",
"subreddit#name": subreddit.Name, zap.Int64("subreddit#id", id),
"count": tps.Count, zap.String("subreddit#name", subreddit.NormalizedName()),
}).Debug("loaded month's hot posts") zap.Int("count", tps.Count),
)
if tps.Count == 0 { if tps.Count == 0 {
tc.logger.WithFields(logrus.Fields{ tc.logger.Debug("no top posts, bailing early",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
}).Debug("no top posts for subreddit, returning") zap.String("subreddit#name", subreddit.NormalizedName()),
)
return return
} }
if tps.Count < 20 { if tps.Count < 20 {
tc.logger.WithFields(logrus.Fields{ tc.logger.Debug("no top posts, bailing early",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
}).Debug("not enough posts, returning") zap.String("subreddit#name", subreddit.NormalizedName()),
zap.Int("count", tps.Count),
)
return return
} }
middlePost := tps.Count / 2 middlePost := tps.Count / 2
medianScore := tps.Children[middlePost].Score medianScore := tps.Children[middlePost].Score
tc.logger.WithFields(logrus.Fields{ tc.logger.Debug("calculated median score",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"score": medianScore, zap.String("subreddit#name", subreddit.NormalizedName()),
}).Debug("calculated median score") zap.Int64("score", medianScore),
)
// Grab hot posts and filter out anything that's > 2 days old // Grab hot posts and filter out anything that's > 2 days old
i = rand.Intn(len(watchers)) i = rand.Intn(len(watchers))
@ -222,18 +219,18 @@ func (tc *trendingConsumer) Consume(delivery rmq.Delivery) {
rac = tc.reddit.NewAuthenticatedClient(watcher.Account.AccountID, watcher.Account.RefreshToken, watcher.Account.AccessToken) rac = tc.reddit.NewAuthenticatedClient(watcher.Account.AccountID, watcher.Account.RefreshToken, watcher.Account.AccessToken)
hps, err := rac.SubredditHot(tc, subreddit.Name) hps, err := rac.SubredditHot(tc, subreddit.Name)
if err != nil { if err != nil {
tc.logger.WithFields(logrus.Fields{ tc.logger.Error("failed to fetch hot posts",
"subreddit#id": subreddit.ID, zap.Error(err),
"subreddit#name": subreddit.Name, zap.Int64("subreddit#id", id),
"err": err, zap.String("subreddit#name", subreddit.NormalizedName()),
}).Error("failed to fetch hot posts") )
return return
} }
tc.logger.WithFields(logrus.Fields{ tc.logger.Debug("loaded hot posts",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
"count": hps.Count, zap.Int("count", hps.Count),
}).Debug("loaded hot posts") )
// Trending only counts for posts less than 2 days old // Trending only counts for posts less than 2 days old
threshold := time.Now().Add(-24 * time.Hour * 2) threshold := time.Now().Add(-24 * time.Hour * 2)
@ -260,23 +257,24 @@ func (tc *trendingConsumer) Consume(delivery rmq.Delivery) {
notified, _ := tc.redis.Get(tc, lockKey).Bool() notified, _ := tc.redis.Get(tc, lockKey).Bool()
if notified { if notified {
tc.logger.WithFields(logrus.Fields{ tc.logger.Debug("already notified, skipping",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
"watcher#id": watcher.ID, zap.Int64("watcher#id", watcher.ID),
"post#id": post.ID, zap.String("post#id", post.ID),
}).Debug("already notified, skipping") )
continue continue
} }
tc.redis.SetEX(tc, lockKey, true, 48*time.Hour) tc.redis.SetEX(tc, lockKey, true, 48*time.Hour)
if err := tc.watcherRepo.IncrementHits(tc, watcher.ID); err != nil { if err := tc.watcherRepo.IncrementHits(tc, watcher.ID); err != nil {
tc.logger.WithFields(logrus.Fields{ tc.logger.Error("could not increment hits",
"subreddit#id": subreddit.ID, zap.Error(err),
"watcher#id": watcher.ID, zap.Int64("subreddit#id", id),
"err": err, zap.String("subreddit#name", subreddit.NormalizedName()),
}).Error("could not increment hits") zap.Int64("watcher#id", watcher.ID),
)
return return
} }
@ -290,30 +288,31 @@ func (tc *trendingConsumer) Consume(delivery rmq.Delivery) {
res, err := client.Push(notification) res, err := client.Push(notification)
if err != nil || !res.Sent() { if err != nil || !res.Sent() {
_ = tc.statsd.Incr("apns.notification.errors", []string{}, 1) _ = tc.statsd.Incr("apns.notification.errors", []string{}, 1)
tc.logger.WithFields(logrus.Fields{ tc.logger.Error("failed to send notification",
"subreddit#id": subreddit.ID, zap.Error(err),
"post#id": post.ID, zap.Int64("subreddit#id", id),
"device#id": watcher.Device.ID, zap.String("subreddit#name", subreddit.NormalizedName()),
"err": err, zap.String("post#id", post.ID),
"status": res.StatusCode, zap.String("apns", watcher.Device.APNSToken),
"reason": res.Reason, zap.Int("response#status", res.StatusCode),
}).Error("failed to send notification") zap.String("response#reason", res.Reason),
)
} else { } else {
_ = tc.statsd.Incr("apns.notification.sent", []string{}, 1) _ = tc.statsd.Incr("apns.notification.sent", []string{}, 1)
tc.logger.WithFields(logrus.Fields{ tc.logger.Info("sent notification",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"post#id": post.ID, zap.String("subreddit#name", subreddit.NormalizedName()),
"device#id": watcher.Device.ID, zap.String("post#id", post.ID),
"device#token": watcher.Device.APNSToken, zap.String("device#token", watcher.Device.APNSToken),
}).Info("sent notification") )
} }
} }
} }
tc.logger.WithFields(logrus.Fields{ tc.logger.Debug("finishing job",
"subreddit#id": subreddit.ID, zap.Int64("subreddit#id", id),
"subreddit#name": subreddit.Name, zap.String("subreddit#name", subreddit.NormalizedName()),
}).Debug("finishing job") )
} }
func payloadFromTrendingPost(post *reddit.Thing) *payload.Payload { func payloadFromTrendingPost(post *reddit.Thing) *payload.Payload {

View file

@ -15,7 +15,7 @@ import (
"github.com/sideshow/apns2" "github.com/sideshow/apns2"
"github.com/sideshow/apns2/payload" "github.com/sideshow/apns2/payload"
"github.com/sideshow/apns2/token" "github.com/sideshow/apns2/token"
"github.com/sirupsen/logrus" "go.uber.org/zap"
"github.com/christianselig/apollo-backend/internal/domain" "github.com/christianselig/apollo-backend/internal/domain"
"github.com/christianselig/apollo-backend/internal/reddit" "github.com/christianselig/apollo-backend/internal/reddit"
@ -25,7 +25,7 @@ import (
type usersWorker struct { type usersWorker struct {
context.Context context.Context
logger *logrus.Logger logger *zap.Logger
statsd *statsd.Client statsd *statsd.Client
db *pgxpool.Pool db *pgxpool.Pool
redis *redis.Client redis *redis.Client
@ -43,7 +43,7 @@ type usersWorker struct {
const userNotificationTitleFormat = "👨\u200d🚀 %s" const userNotificationTitleFormat = "👨\u200d🚀 %s"
func NewUsersWorker(ctx context.Context, logger *logrus.Logger, statsd *statsd.Client, db *pgxpool.Pool, redis *redis.Client, queue rmq.Connection, consumers int) Worker { func NewUsersWorker(ctx context.Context, logger *zap.Logger, statsd *statsd.Client, db *pgxpool.Pool, redis *redis.Client, queue rmq.Connection, consumers int) Worker {
reddit := reddit.NewClient( reddit := reddit.NewClient(
os.Getenv("REDDIT_CLIENT_ID"), os.Getenv("REDDIT_CLIENT_ID"),
os.Getenv("REDDIT_CLIENT_SECRET"), os.Getenv("REDDIT_CLIENT_SECRET"),
@ -90,9 +90,7 @@ func (uw *usersWorker) Start() error {
return err return err
} }
uw.logger.WithFields(logrus.Fields{ uw.logger.Info("starting up subreddits worker", zap.Int("consumers", uw.consumers))
"numConsumers": uw.consumers,
}).Info("starting up users worker")
prefetchLimit := int64(uw.consumers * 2) prefetchLimit := int64(uw.consumers * 2)
@ -136,44 +134,38 @@ func NewUsersConsumer(uw *usersWorker, tag int) *usersConsumer {
} }
func (uc *usersConsumer) Consume(delivery rmq.Delivery) { func (uc *usersConsumer) Consume(delivery rmq.Delivery) {
uc.logger.WithFields(logrus.Fields{
"user#id": delivery.Payload(),
}).Debug("starting job")
id, err := strconv.ParseInt(delivery.Payload(), 10, 64) id, err := strconv.ParseInt(delivery.Payload(), 10, 64)
if err != nil { if err != nil {
uc.logger.WithFields(logrus.Fields{ uc.logger.Error("failed to parse subreddit id from payload", zap.Error(err), zap.String("payload", delivery.Payload()))
"user#id": delivery.Payload(),
"err": err,
}).Error("failed to parse user ID")
_ = delivery.Reject() _ = delivery.Reject()
return return
} }
uc.logger.Debug("starting job", zap.Int64("subreddit#id", id))
defer func() { _ = delivery.Ack() }() defer func() { _ = delivery.Ack() }()
user, err := uc.userRepo.GetByID(uc, id) user, err := uc.userRepo.GetByID(uc, id)
if err != nil { if err != nil {
uc.logger.WithFields(logrus.Fields{ uc.logger.Error("failed to fetch user from database", zap.Error(err), zap.Int64("subreddit#id", id))
"err": err,
}).Error("failed to fetch user from database")
return return
} }
watchers, err := uc.watcherRepo.GetByUserID(uc, user.ID) watchers, err := uc.watcherRepo.GetByUserID(uc, user.ID)
if err != nil { if err != nil {
uc.logger.WithFields(logrus.Fields{ uc.logger.Error("failed to fetch watchers from database",
"user#id": user.ID, zap.Error(err),
"err": err, zap.Int64("user#id", id),
}).Error("failed to fetch watchers from database") zap.String("user#name", user.NormalizedName()),
)
return return
} }
if len(watchers) == 0 { if len(watchers) == 0 {
uc.logger.WithFields(logrus.Fields{ uc.logger.Debug("no watchers for user, bailing early",
"user#id": user.ID, zap.Int64("user#id", id),
}).Info("no watchers for user, skipping") zap.String("user#name", user.NormalizedName()),
)
return return
} }
@ -186,41 +178,46 @@ func (uc *usersConsumer) Consume(delivery rmq.Delivery) {
ru, err := rac.UserAbout(uc, user.Name) ru, err := rac.UserAbout(uc, user.Name)
if err != nil { if err != nil {
uc.logger.WithFields(logrus.Fields{ uc.logger.Error("failed to fetch user details",
"user#id": user.ID, zap.Error(err),
"err": err, zap.Int64("user#id", id),
}).Error("failed to fetch user details") zap.String("user#name", user.NormalizedName()),
)
return return
} }
if !ru.AcceptFollowers { if !ru.AcceptFollowers {
uc.logger.WithFields(logrus.Fields{ uc.logger.Info("user disabled followers, removing",
"user#id": user.ID, zap.Int64("user#id", id),
}).Info("user disabled followers, removing") zap.String("user#name", user.NormalizedName()),
)
if err := uc.watcherRepo.DeleteByTypeAndWatcheeID(uc, domain.UserWatcher, user.ID); err != nil { if err := uc.watcherRepo.DeleteByTypeAndWatcheeID(uc, domain.UserWatcher, user.ID); err != nil {
uc.logger.WithFields(logrus.Fields{ uc.logger.Error("failed to remove watchers for user who disallows followers",
"user#id": user.ID, zap.Error(err),
"err": err, zap.Int64("user#id", id),
}).Error("failed to delete watchers for user who does not allow followers") zap.String("user#name", user.NormalizedName()),
)
return return
} }
if err := uc.userRepo.Delete(uc, user.ID); err != nil { if err := uc.userRepo.Delete(uc, user.ID); err != nil {
uc.logger.WithFields(logrus.Fields{ uc.logger.Error("failed to remove user",
"user#id": user.ID, zap.Error(err),
"err": err, zap.Int64("user#id", id),
}).Error("failed to delete user") zap.String("user#name", user.NormalizedName()),
)
return return
} }
} }
posts, err := rac.UserPosts(uc, user.Name) posts, err := rac.UserPosts(uc, user.Name)
if err != nil { if err != nil {
uc.logger.WithFields(logrus.Fields{ uc.logger.Error("failed to fetch user activity",
"user#id": user.ID, zap.Error(err),
"err": err, zap.Int64("user#id", id),
}).Error("failed to fetch user activity") zap.String("user#name", user.NormalizedName()),
)
return return
} }
@ -261,11 +258,12 @@ func (uc *usersConsumer) Consume(delivery rmq.Delivery) {
for _, watcher := range notifs { for _, watcher := range notifs {
if err := uc.watcherRepo.IncrementHits(uc, watcher.ID); err != nil { if err := uc.watcherRepo.IncrementHits(uc, watcher.ID); err != nil {
uc.logger.WithFields(logrus.Fields{ uc.logger.Error("failed to increment watcher hits",
"user#id": user.ID, zap.Error(err),
"watcher#id": watcher.ID, zap.Int64("user#id", id),
"err": err, zap.String("user#name", user.NormalizedName()),
}).Error("could not increment hits") zap.Int64("watcher#id", watcher.ID),
)
return return
} }
@ -285,28 +283,31 @@ func (uc *usersConsumer) Consume(delivery rmq.Delivery) {
res, err := client.Push(notification) res, err := client.Push(notification)
if err != nil || !res.Sent() { if err != nil || !res.Sent() {
_ = uc.statsd.Incr("apns.notification.errors", []string{}, 1) _ = uc.statsd.Incr("apns.notification.errors", []string{}, 1)
uc.logger.WithFields(logrus.Fields{ uc.logger.Error("failed to send notification",
"user#id": user.ID, zap.Error(err),
"device#id": device.ID, zap.Int64("user#id", id),
"err": err, zap.String("user#name", user.NormalizedName()),
"status": res.StatusCode, zap.String("post#id", post.ID),
"reason": res.Reason, zap.String("apns", watcher.Device.APNSToken),
}).Error("failed to send notification") zap.Int("response#status", res.StatusCode),
zap.String("response#reason", res.Reason),
)
} else { } else {
_ = uc.statsd.Incr("apns.notification.sent", []string{}, 1) _ = uc.statsd.Incr("apns.notification.sent", []string{}, 1)
uc.logger.WithFields(logrus.Fields{ uc.logger.Info("sent notification",
"user#id": user.ID, zap.Int64("user#id", id),
"device#id": device.ID, zap.String("user#name", user.NormalizedName()),
"device#token": device.APNSToken, zap.String("post#id", post.ID),
}).Info("sent notification") zap.String("device#token", watcher.Device.APNSToken),
)
} }
} }
} }
uc.logger.WithFields(logrus.Fields{ uc.logger.Debug("finishing job",
"user#id": user.ID, zap.Int64("user#id", id),
"user#name": user.Name, zap.String("user#name", user.NormalizedName()),
}).Debug("finishing job") )
} }
func payloadFromUserPost(post *reddit.Thing) *payload.Payload { func payloadFromUserPost(post *reddit.Thing) *payload.Payload {

View file

@ -8,12 +8,12 @@ import (
"github.com/adjust/rmq/v4" "github.com/adjust/rmq/v4"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/jackc/pgx/v4/pgxpool" "github.com/jackc/pgx/v4/pgxpool"
"github.com/sirupsen/logrus" "go.uber.org/zap"
) )
const pollDuration = 100 * time.Millisecond const pollDuration = 100 * time.Millisecond
type NewWorkerFn func(context.Context, *logrus.Logger, *statsd.Client, *pgxpool.Pool, *redis.Client, rmq.Connection, int) Worker type NewWorkerFn func(context.Context, *zap.Logger, *statsd.Client, *pgxpool.Pool, *redis.Client, rmq.Connection, int) Worker
type Worker interface { type Worker interface {
Start() error Start() error
Stop() Stop()