From de78eab63a99653edf68f783e263688ad4b701d8 Mon Sep 17 00:00:00 2001 From: Brian Meek Date: Fri, 5 Aug 2022 01:19:33 -0700 Subject: [PATCH] Add race testing to tests, and fix a few small race conditions in the tests (#2587) * Add race testing to tests, and fix a few small race conditions in the tests * Enable run-sytest on MacOS * Remove deadlock detecting mutex, per code review feedback * Remove autoformatting related changes and a closure that is not needed * Adjust to importing nats client as 'natsclient' Signed-off-by: Brian Meek * Clarify the use of gooseMutex to proect goose internal state Signed-off-by: Brian Meek * Remove no longer needed mutex for guarding goose Signed-off-by: Brian Meek --- build/scripts/build-test-lint.sh | 2 +- docs/CONTRIBUTING.md | 2 +- federationapi/federationapi_test.go | 10 ++++++++++ keyserver/storage/storage_test.go | 9 ++++++++- run-sytest.sh | 2 +- setup/jetstream/nats.go | 14 +++++++------- syncapi/sync/requestpool_test.go | 7 +++++++ 7 files changed, 35 insertions(+), 11 deletions(-) diff --git a/build/scripts/build-test-lint.sh b/build/scripts/build-test-lint.sh index 8f0b775b1..32f89c076 100755 --- a/build/scripts/build-test-lint.sh +++ b/build/scripts/build-test-lint.sh @@ -13,4 +13,4 @@ go build ./cmd/... ./build/scripts/find-lint.sh echo "Testing..." -go test -v ./... +go test --race -v ./... diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 169224b9e..771af9ecf 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -64,7 +64,7 @@ comment. Please avoid doing this if you can. We also have unit tests which we run via: ```bash -go test ./... +go test --race ./... ``` In general, we like submissions that come with tests. Anything that proves that the diff --git a/federationapi/federationapi_test.go b/federationapi/federationapi_test.go index ae244c566..8884e34c6 100644 --- a/federationapi/federationapi_test.go +++ b/federationapi/federationapi_test.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "strings" + "sync" "testing" "time" @@ -48,6 +49,7 @@ func (f *fedRoomserverAPI) QueryRoomsForUser(ctx context.Context, req *rsapi.Que // TODO: This struct isn't generic, only works for TestFederationAPIJoinThenKeyUpdate type fedClient struct { + fedClientMutex sync.Mutex api.FederationClient allowJoins []*test.Room keys map[gomatrixserverlib.ServerName]struct { @@ -59,6 +61,8 @@ type fedClient struct { } func (f *fedClient) GetServerKeys(ctx context.Context, matrixServer gomatrixserverlib.ServerName) (gomatrixserverlib.ServerKeys, error) { + f.fedClientMutex.Lock() + defer f.fedClientMutex.Unlock() fmt.Println("GetServerKeys:", matrixServer) var keys gomatrixserverlib.ServerKeys var keyID gomatrixserverlib.KeyID @@ -122,6 +126,8 @@ func (f *fedClient) MakeJoin(ctx context.Context, s gomatrixserverlib.ServerName return } func (f *fedClient) SendJoin(ctx context.Context, s gomatrixserverlib.ServerName, event *gomatrixserverlib.Event) (res gomatrixserverlib.RespSendJoin, err error) { + f.fedClientMutex.Lock() + defer f.fedClientMutex.Unlock() for _, r := range f.allowJoins { if r.ID == event.RoomID() { r.InsertEvent(f.t, event.Headered(r.Version)) @@ -134,6 +140,8 @@ func (f *fedClient) SendJoin(ctx context.Context, s gomatrixserverlib.ServerName } func (f *fedClient) SendTransaction(ctx context.Context, t gomatrixserverlib.Transaction) (res gomatrixserverlib.RespSend, err error) { + f.fedClientMutex.Lock() + defer f.fedClientMutex.Unlock() for _, edu := range t.EDUs { if edu.Type == gomatrixserverlib.MDeviceListUpdate { f.sentTxn = true @@ -242,6 +250,8 @@ func testFederationAPIJoinThenKeyUpdate(t *testing.T, dbType test.DBType) { testrig.MustPublishMsgs(t, jsctx, msg) time.Sleep(500 * time.Millisecond) + fc.fedClientMutex.Lock() + defer fc.fedClientMutex.Unlock() if !fc.sentTxn { t.Fatalf("did not send device list update") } diff --git a/keyserver/storage/storage_test.go b/keyserver/storage/storage_test.go index 44cfb5f2a..e7a2af7c2 100644 --- a/keyserver/storage/storage_test.go +++ b/keyserver/storage/storage_test.go @@ -3,6 +3,7 @@ package storage_test import ( "context" "reflect" + "sync" "testing" "github.com/matrix-org/dendrite/keyserver/api" @@ -103,6 +104,9 @@ func TestKeyChangesUpperLimit(t *testing.T) { }) } +var dbLock sync.Mutex +var deviceArray = []string{"AAA", "another_device"} + // The purpose of this test is to make sure that the storage layer is generating sequential stream IDs per user, // and that they are returned correctly when querying for device keys. func TestDeviceKeysStreamIDGeneration(t *testing.T) { @@ -169,8 +173,11 @@ func TestDeviceKeysStreamIDGeneration(t *testing.T) { t.Fatalf("Expected StoreLocalDeviceKeys to set StreamID=3 (new key same device) but got %d", msgs[0].StreamID) } + dbLock.Lock() + defer dbLock.Unlock() // Querying for device keys returns the latest stream IDs - msgs, err = db.DeviceKeysForUser(ctx, alice, []string{"AAA", "another_device"}, false) + msgs, err = db.DeviceKeysForUser(ctx, alice, deviceArray, false) + if err != nil { t.Fatalf("DeviceKeysForUser returned error: %s", err) } diff --git a/run-sytest.sh b/run-sytest.sh index 47635fd12..e23982397 100755 --- a/run-sytest.sh +++ b/run-sytest.sh @@ -17,7 +17,7 @@ main() { if [ -d ../sytest ]; then local tmpdir - tmpdir="$(mktemp -d --tmpdir run-systest.XXXXXXXXXX)" + tmpdir="$(mktemp -d -t run-systest.XXXXXXXXXX)" trap "rm -r '$tmpdir'" EXIT if [ -z "$DISABLE_BUILDING_SYTEST" ]; then diff --git a/setup/jetstream/nats.go b/setup/jetstream/nats.go index be216a02a..051d55a35 100644 --- a/setup/jetstream/nats.go +++ b/setup/jetstream/nats.go @@ -14,16 +14,16 @@ import ( "github.com/sirupsen/logrus" natsserver "github.com/nats-io/nats-server/v2/server" - "github.com/nats-io/nats.go" natsclient "github.com/nats-io/nats.go" ) type NATSInstance struct { *natsserver.Server - sync.Mutex } -func DeleteAllStreams(js nats.JetStreamContext, cfg *config.JetStream) { +var natsLock sync.Mutex + +func DeleteAllStreams(js natsclient.JetStreamContext, cfg *config.JetStream) { for _, stream := range streams { // streams are defined in streams.go name := cfg.Prefixed(stream.Name) _ = js.DeleteStream(name) @@ -31,11 +31,12 @@ func DeleteAllStreams(js nats.JetStreamContext, cfg *config.JetStream) { } func (s *NATSInstance) Prepare(process *process.ProcessContext, cfg *config.JetStream) (natsclient.JetStreamContext, *natsclient.Conn) { + natsLock.Lock() + defer natsLock.Unlock() // check if we need an in-process NATS Server if len(cfg.Addresses) != 0 { return setupNATS(process, cfg, nil) } - s.Lock() if s.Server == nil { var err error s.Server, err = natsserver.NewServer(&natsserver.Options{ @@ -63,7 +64,6 @@ func (s *NATSInstance) Prepare(process *process.ProcessContext, cfg *config.JetS process.ComponentFinished() }() } - s.Unlock() if !s.ReadyForConnections(time.Second * 10) { logrus.Fatalln("NATS did not start in time") } @@ -77,9 +77,9 @@ func (s *NATSInstance) Prepare(process *process.ProcessContext, cfg *config.JetS func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsclient.Conn) (natsclient.JetStreamContext, *natsclient.Conn) { if nc == nil { var err error - opts := []nats.Option{} + opts := []natsclient.Option{} if cfg.DisableTLSValidation { - opts = append(opts, nats.Secure(&tls.Config{ + opts = append(opts, natsclient.Secure(&tls.Config{ InsecureSkipVerify: true, })) } diff --git a/syncapi/sync/requestpool_test.go b/syncapi/sync/requestpool_test.go index 48e6c6c7a..3e5769d8c 100644 --- a/syncapi/sync/requestpool_test.go +++ b/syncapi/sync/requestpool_test.go @@ -12,10 +12,13 @@ import ( ) type dummyPublisher struct { + lock sync.Mutex count int } func (d *dummyPublisher) SendPresence(userID string, presence types.Presence, statusMsg *string) error { + d.lock.Lock() + defer d.lock.Unlock() d.count++ return nil } @@ -125,11 +128,15 @@ func TestRequestPool_updatePresence(t *testing.T) { go rp.cleanPresence(db, time.Millisecond*50) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + publisher.lock.Lock() beforeCount := publisher.count + publisher.lock.Unlock() rp.updatePresence(db, tt.args.presence, tt.args.userID) + publisher.lock.Lock() if tt.wantIncrease && publisher.count <= beforeCount { t.Fatalf("expected count to increase: %d <= %d", publisher.count, beforeCount) } + publisher.lock.Unlock() time.Sleep(tt.args.sleep) }) }