This commit is contained in:
André Medeiros 2023-06-08 20:49:45 +02:00 committed by GitHub
commit b5d52f55aa
12 changed files with 271 additions and 10 deletions

View file

@ -5,6 +5,7 @@ jobs:
runs-on: ubuntu-latest
env:
DATABASE_URL: postgres://postgres:postgres@localhost/apollo_test
REDIS_URL: redis://localhost:6379
services:
postgres:
image: postgres
@ -18,12 +19,20 @@ jobs:
--health-retries 5
ports:
- 5432:5432
redis:
image: redis
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 6379:6379
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: 1.19.3
go-version: 1.20.2
- uses: golangci/golangci-lint-action@v3
- run: psql -f docs/schema.sql $DATABASE_URL
- run: go test ./... -v -race -timeout 5s

View file

@ -17,11 +17,5 @@ linters:
- thelper # detects golang test helpers without t.Helper() call and checks consistency of test helpers
- unconvert # removes unnecessary type conversions
- unparam # removes unused function parameters
fast: true
issues:
exclude-rules:
# False positive: https://github.com/kunwardeep/paralleltest/issues/8.
- linters:
- paralleltest
text: "does not use range value in test Run"
fast: true

8
.idea/.gitignore vendored Normal file
View file

@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

9
.idea/apollo-backend.iml Normal file
View file

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="WEB_MODULE" version="4">
<component name="Go" enabled="true" />
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

View file

@ -0,0 +1,5 @@
<component name="ProjectCodeStyleConfiguration">
<state>
<option name="PREFERRED_PROJECT_CODE_STYLE" value="Default" />
</state>
</component>

8
.idea/modules.xml Normal file
View file

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/apollo-backend.iml" filepath="$PROJECT_DIR$/.idea/apollo-backend.iml" />
</modules>
</component>
</project>

6
.idea/vcs.xml Normal file
View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>

View file

@ -1,8 +1,9 @@
BREW_PREFIX ?= $(shell brew --prefix)
DATABASE_URL ?= "postgres://$(USER)@localhost/apollo_test?sslmode=disable"
REDIS_URL ?= "redis://localhost:6379"
test:
@DATABASE_URL=$(DATABASE_URL) go test -race -timeout 1s ./...
@DATABASE_URL=$(DATABASE_URL) REDIS_URL=$(REDIS_URL) go test -race -timeout 1s ./...
test-setup: $(BREW_PREFIX)/bin/migrate
migrate -path migrations/ -database $(DATABASE_URL) up

View file

@ -0,0 +1,92 @@
package distributedlock
import (
"context"
"fmt"
"math/rand"
"time"
"github.com/go-redis/redis/v8"
)
const (
lockTopicFormat = "pubsub:locks:%s"
lockReleaseScript = `
if redis.call("GET", KEYS[1]) == ARGV[1] then
redis.call("DEL", KEYS[1])
redis.call("PUBLISH", KEYS[2], KEYS[1])
return 1
else
return 0
end
`
)
type DistributedLock struct {
client *redis.Client
sha string
timeout time.Duration
}
func New(client *redis.Client, timeout time.Duration) (*DistributedLock, error) {
sha, err := client.ScriptLoad(context.Background(), lockReleaseScript).Result()
if err != nil {
return nil, err
}
return &DistributedLock{
client: client,
sha: sha,
timeout: timeout,
}, nil
}
func (d *DistributedLock) setLock(ctx context.Context, key string, uid string) error {
result, err := d.client.SetNX(ctx, key, uid, d.timeout).Result()
if err != nil {
return err
}
if !result {
return ErrLockAlreadyAcquired
}
return nil
}
func (d *DistributedLock) AcquireLock(ctx context.Context, key string) (*Lock, error) {
uid := generateUniqueID()
if err := d.setLock(ctx, key, uid); err != nil {
return nil, err
}
return NewLock(d, key, uid), nil
}
func (d *DistributedLock) WaitAcquireLock(ctx context.Context, key string, timeout time.Duration) (*Lock, error) {
uid := generateUniqueID()
if err := d.setLock(ctx, key, uid); err == nil {
return NewLock(d, key, uid), nil
}
ch := fmt.Sprintf(lockTopicFormat, key)
pubsub := d.client.Subscribe(ctx, ch)
defer func() { _ = pubsub.Close() }()
select {
case <-time.After(timeout):
return nil, ErrLockAcquisitionTimeout
case <-ctx.Done():
return nil, ctx.Err()
case <-pubsub.Channel():
err := d.setLock(ctx, key, uid)
if err != nil {
return nil, err
}
return NewLock(d, key, uid), nil
}
}
func generateUniqueID() string {
return fmt.Sprintf("%d-%d", time.Now().UnixNano(), rand.Int63())
}

View file

@ -0,0 +1,84 @@
package distributedlock_test
import (
"context"
"fmt"
"math/rand"
"os"
"testing"
"time"
"github.com/christianselig/apollo-backend/internal/distributedlock"
"github.com/go-redis/redis/v8"
"github.com/stretchr/testify/assert"
)
func NewRedisClient(t *testing.T, ctx context.Context) (*redis.Client, func()) {
t.Helper()
opt, err := redis.ParseURL(os.Getenv("REDIS_URL"))
if err != nil {
panic(err)
}
client := redis.NewClient(opt)
if err := client.Ping(ctx).Err(); err != nil {
panic(err)
}
return client, func() {
_ = client.Close()
}
}
func TestDistributedLock_AcquireLock(t *testing.T) {
t.Parallel()
ctx := context.Background()
key := fmt.Sprintf("key:%d-%d", time.Now().UnixNano(), rand.Int63())
client, closer := NewRedisClient(t, ctx)
defer closer()
d, err := distributedlock.New(client, 10*time.Second)
assert.NoError(t, err)
lock, err := d.AcquireLock(ctx, key)
assert.NoError(t, err)
_, err = d.AcquireLock(ctx, key)
assert.Equal(t, distributedlock.ErrLockAlreadyAcquired, err)
err = lock.Release(ctx)
assert.NoError(t, err)
_, err = d.AcquireLock(ctx, key)
assert.NoError(t, err)
}
func TestDistributedLock_WaitAcquireLock(t *testing.T) {
t.Parallel()
ctx := context.Background()
key := fmt.Sprintf("key:%d-%d", time.Now().UnixNano(), rand.Int63())
client, closer := NewRedisClient(t, ctx)
defer closer()
d, err := distributedlock.New(client, 10*time.Second)
assert.NoError(t, err)
lock, err := d.AcquireLock(ctx, key)
assert.NoError(t, err)
go func(l *distributedlock.Lock) {
select {
case <-time.After(100 * time.Millisecond):
_ = l.Release(ctx)
}
}(lock)
lock, err = d.WaitAcquireLock(ctx, key, 5*time.Second)
assert.NoError(t, err)
assert.NotNil(t, lock)
}

View file

@ -0,0 +1,9 @@
package distributedlock
import "errors"
var (
ErrLockAcquisitionTimeout = errors.New("timed out acquiring lock")
ErrLockAlreadyAcquired = errors.New("lock already acquired")
ErrLockExpired = errors.New("releasing an expired lock")
)

View file

@ -0,0 +1,36 @@
package distributedlock
import (
"context"
"fmt"
)
type Lock struct {
distributedLock *DistributedLock
key string
uid string
}
func NewLock(distributedLock *DistributedLock, key string, uid string) *Lock {
return &Lock{
distributedLock: distributedLock,
key: key,
uid: uid,
}
}
func (l *Lock) Release(ctx context.Context) error {
ch := fmt.Sprintf(lockTopicFormat, l.key)
result, err := l.distributedLock.client.EvalSha(ctx, l.distributedLock.sha, []string{l.key, ch}, l.uid).Result()
if err != nil {
return err
}
if result == int64(0) {
return ErrLockExpired
}
return nil
}