mirror of
https://github.com/go-gitea/gitea
synced 2024-11-15 06:21:23 +01:00
bc0977f1c9
Follow #31908. The main refactor is that it has removed the returned context of `Lock`. The returned context of `Lock` in old code is to provide a way to let callers know that they have lost the lock. But in most cases, callers shouldn't cancel what they are doing even it has lost the lock. And the design would confuse developers and make them use it incorrectly. See the discussion history: https://github.com/go-gitea/gitea/pull/31813#discussion_r1732041513 and https://github.com/go-gitea/gitea/pull/31813#discussion_r1734078998 It's a breaking change, but since the new module hasn't been used yet, I think it's OK to not add the `pr/breaking` label. ## Design principles It's almost copied from #31908, but with some changes. ### Use spinlock even in memory implementation (unchanged) In actual use cases, users may cancel requests. `sync.Mutex` will block the goroutine until the lock is acquired even if the request is canceled. And the spinlock is more suitable for this scenario since it's possible to give up the lock acquisition. Although the spinlock consumes more CPU resources, I think it's acceptable in most cases. ### Do not expose the mutex to callers (unchanged) If we expose the mutex to callers, it's possible for callers to reuse the mutex, which causes more complexity. For example: ```go lock := GetLocker(key) lock.Lock() // ... // even if the lock is unlocked, we cannot GC the lock, // since the caller may still use it again. lock.Unlock() lock.Lock() // ... lock.Unlock() // callers have to GC the lock manually. RemoveLocker(key) ``` That's why https://github.com/go-gitea/gitea/pull/31813#discussion_r1721200549 In this PR, we only expose `ReleaseFunc` to callers. So callers just need to call `ReleaseFunc` to release the lock, and do not need to care about the lock's lifecycle. ```go release, err := locker.Lock(ctx, key) if err != nil { return err } // ... release() // if callers want to lock again, they have to re-acquire the lock. release, err := locker.Lock(ctx, key) // ... ``` In this way, it's also much easier for redis implementation to extend the mutex automatically, so that callers do not need to care about the lock's lifecycle. See also https://github.com/go-gitea/gitea/pull/31813#discussion_r1722659743 ### Use "release" instead of "unlock" (unchanged) For "unlock", it has the meaning of "unlock an acquired lock". So it's not acceptable to call "unlock" when failed to acquire the lock, or call "unlock" multiple times. It causes more complexity for callers to decide whether to call "unlock" or not. So we use "release" instead of "unlock" to make it clear. Whether the lock is acquired or not, callers can always call "release", and it's also safe to call "release" multiple times. But the code DO NOT expect callers to not call "release" after acquiring the lock. If callers forget to call "release", it will cause resource leak. That's why it's always safe to call "release" without extra checks: to avoid callers to forget to call it. ### Acquired locks could be lost, but the callers shouldn't stop Unlike `sync.Mutex` which will be locked forever once acquired until calling `Unlock`, for distributed lock, the acquired lock could be lost. For example, the caller has acquired the lock, and it holds the lock for a long time since auto-extending is working for redis. However, it lost the connection to the redis server, and it's impossible to extend the lock anymore. In #31908, it will cancel the context to make the operation stop, but it's not safe. Many operations are not revert-able. If they have been interrupted, then the instance goes corrupted. So `Lock` won't return `ctx` anymore in this PR. ### Multiple ways to use the lock 1. Regular way ```go release, err := Lock(ctx, key) if err != nil { return err } defer release() // ... ``` 2. Early release ```go release, err := Lock(ctx, key) if err != nil { return err } defer release() // ... // release the lock earlier release() // continue to do something else // ... ``` 3. Functional way ```go if err := LockAndDo(ctx, key, func(ctx context.Context) error { // ... return nil }); err != nil { return err } ```
181 lines
4.3 KiB
Go
181 lines
4.3 KiB
Go
// Copyright 2024 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package globallock
|
|
|
|
import (
|
|
"context"
|
|
"os"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/go-redsync/redsync/v4"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func TestLocker(t *testing.T) {
|
|
t.Run("redis", func(t *testing.T) {
|
|
url := "redis://127.0.0.1:6379/0"
|
|
if os.Getenv("CI") == "" {
|
|
// Make it possible to run tests against a local redis instance
|
|
url = os.Getenv("TEST_REDIS_URL")
|
|
if url == "" {
|
|
t.Skip("TEST_REDIS_URL not set and not running in CI")
|
|
return
|
|
}
|
|
}
|
|
oldExpiry := redisLockExpiry
|
|
redisLockExpiry = 5 * time.Second // make it shorter for testing
|
|
defer func() {
|
|
redisLockExpiry = oldExpiry
|
|
}()
|
|
|
|
locker := NewRedisLocker(url)
|
|
testLocker(t, locker)
|
|
testRedisLocker(t, locker.(*redisLocker))
|
|
require.NoError(t, locker.(*redisLocker).Close())
|
|
})
|
|
t.Run("memory", func(t *testing.T) {
|
|
locker := NewMemoryLocker()
|
|
testLocker(t, locker)
|
|
testMemoryLocker(t, locker.(*memoryLocker))
|
|
})
|
|
}
|
|
|
|
func testLocker(t *testing.T, locker Locker) {
|
|
t.Run("lock", func(t *testing.T) {
|
|
parentCtx := context.Background()
|
|
release, err := locker.Lock(parentCtx, "test")
|
|
defer release()
|
|
|
|
assert.NoError(t, err)
|
|
|
|
func() {
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
defer cancel()
|
|
release, err := locker.Lock(ctx, "test")
|
|
defer release()
|
|
|
|
assert.Error(t, err)
|
|
}()
|
|
|
|
release()
|
|
|
|
func() {
|
|
release, err := locker.Lock(context.Background(), "test")
|
|
defer release()
|
|
|
|
assert.NoError(t, err)
|
|
}()
|
|
})
|
|
|
|
t.Run("try lock", func(t *testing.T) {
|
|
parentCtx := context.Background()
|
|
ok, release, err := locker.TryLock(parentCtx, "test")
|
|
defer release()
|
|
|
|
assert.True(t, ok)
|
|
assert.NoError(t, err)
|
|
|
|
func() {
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
defer cancel()
|
|
ok, release, err := locker.TryLock(ctx, "test")
|
|
defer release()
|
|
|
|
assert.False(t, ok)
|
|
assert.NoError(t, err)
|
|
}()
|
|
|
|
release()
|
|
|
|
func() {
|
|
ok, release, _ := locker.TryLock(context.Background(), "test")
|
|
defer release()
|
|
|
|
assert.True(t, ok)
|
|
}()
|
|
})
|
|
|
|
t.Run("wait and acquired", func(t *testing.T) {
|
|
ctx := context.Background()
|
|
release, err := locker.Lock(ctx, "test")
|
|
require.NoError(t, err)
|
|
|
|
wg := &sync.WaitGroup{}
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
started := time.Now()
|
|
release, err := locker.Lock(context.Background(), "test") // should be blocked for seconds
|
|
defer release()
|
|
assert.Greater(t, time.Since(started), time.Second)
|
|
assert.NoError(t, err)
|
|
}()
|
|
|
|
time.Sleep(2 * time.Second)
|
|
release()
|
|
|
|
wg.Wait()
|
|
})
|
|
|
|
t.Run("multiple release", func(t *testing.T) {
|
|
ctx := context.Background()
|
|
|
|
release1, err := locker.Lock(ctx, "test")
|
|
require.NoError(t, err)
|
|
|
|
release1()
|
|
|
|
release2, err := locker.Lock(ctx, "test")
|
|
defer release2()
|
|
require.NoError(t, err)
|
|
|
|
// Call release1 again,
|
|
// it should not panic or block,
|
|
// and it shouldn't affect the other lock
|
|
release1()
|
|
|
|
ok, release3, err := locker.TryLock(ctx, "test")
|
|
defer release3()
|
|
require.NoError(t, err)
|
|
// It should be able to acquire the lock;
|
|
// otherwise, it means the lock has been released by release1
|
|
assert.False(t, ok)
|
|
})
|
|
}
|
|
|
|
// testMemoryLocker does specific tests for memoryLocker
|
|
func testMemoryLocker(t *testing.T, locker *memoryLocker) {
|
|
// nothing to do
|
|
}
|
|
|
|
// testRedisLocker does specific tests for redisLocker
|
|
func testRedisLocker(t *testing.T, locker *redisLocker) {
|
|
defer func() {
|
|
// This case should be tested at the end.
|
|
// Otherwise, it will affect other tests.
|
|
t.Run("close", func(t *testing.T) {
|
|
assert.NoError(t, locker.Close())
|
|
_, err := locker.Lock(context.Background(), "test")
|
|
assert.Error(t, err)
|
|
})
|
|
}()
|
|
|
|
t.Run("failed extend", func(t *testing.T) {
|
|
release, err := locker.Lock(context.Background(), "test")
|
|
defer release()
|
|
require.NoError(t, err)
|
|
|
|
// It simulates that there are some problems with extending like network issues or redis server down.
|
|
v, ok := locker.mutexM.Load("test")
|
|
require.True(t, ok)
|
|
m := v.(*redsync.Mutex)
|
|
_, _ = m.Unlock() // release it to make it impossible to extend
|
|
|
|
// In current design, callers can't know the lock can't be extended.
|
|
// Just keep this case to improve the test coverage.
|
|
})
|
|
}
|