|
|
@@ -16,6 +16,7 @@ package integration
|
|
|
|
|
|
import (
|
|
|
"math/rand"
|
|
|
+ "sync"
|
|
|
"testing"
|
|
|
"time"
|
|
|
|
|
|
@@ -28,18 +29,24 @@ import (
|
|
|
func TestMutexSingleNode(t *testing.T) {
|
|
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
|
defer clus.Terminate(t)
|
|
|
- testMutex(t, 5, func() *clientv3.Client { return clus.clients[0] })
|
|
|
+
|
|
|
+ var clients []*clientv3.Client
|
|
|
+ testMutex(t, 5, makeSingleNodeClients(t, clus.cluster, &clients))
|
|
|
+ closeClients(t, clients)
|
|
|
}
|
|
|
|
|
|
func TestMutexMultiNode(t *testing.T) {
|
|
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
|
defer clus.Terminate(t)
|
|
|
- testMutex(t, 5, func() *clientv3.Client { return clus.RandClient() })
|
|
|
+
|
|
|
+ var clients []*clientv3.Client
|
|
|
+ testMutex(t, 5, makeMultiNodeClients(t, clus.cluster, &clients))
|
|
|
+ closeClients(t, clients)
|
|
|
}
|
|
|
|
|
|
func testMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
|
|
|
// stream lock acquisitions
|
|
|
- lockedC := make(chan *concurrency.Mutex, 1)
|
|
|
+ lockedC := make(chan *concurrency.Mutex)
|
|
|
for i := 0; i < waiters; i++ {
|
|
|
go func() {
|
|
|
m := concurrency.NewMutex(chooseClient(), "test-mutex")
|
|
|
@@ -69,6 +76,22 @@ func testMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+// TestMutexSessionRelock ensures that acquiring the same lock with the same
|
|
|
+// session will not result in deadlock.
|
|
|
+func TestMutexSessionRelock(t *testing.T) {
|
|
|
+ clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
|
+ defer clus.Terminate(t)
|
|
|
+ cli := clus.RandClient()
|
|
|
+ m := concurrency.NewMutex(cli, "test-mutex")
|
|
|
+ if err := m.Lock(context.TODO()); err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+ m2 := concurrency.NewMutex(cli, "test-mutex")
|
|
|
+ if err := m2.Lock(context.TODO()); err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
func BenchmarkMutex4Waiters(b *testing.B) {
|
|
|
// XXX switch tests to use TB interface
|
|
|
clus := NewClusterV3(nil, &ClusterConfig{Size: 3})
|
|
|
@@ -137,3 +160,38 @@ func testRWMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+func makeClients(t *testing.T, clients *[]*clientv3.Client, choose func() *member) func() *clientv3.Client {
|
|
|
+ var mu sync.Mutex
|
|
|
+ *clients = nil
|
|
|
+ return func() *clientv3.Client {
|
|
|
+ cli, err := NewClientV3(choose())
|
|
|
+ if err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+ mu.Lock()
|
|
|
+ *clients = append(*clients, cli)
|
|
|
+ mu.Unlock()
|
|
|
+ return cli
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+func makeSingleNodeClients(t *testing.T, clus *cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
|
|
|
+ return makeClients(t, clients, func() *member {
|
|
|
+ return clus.Members[0]
|
|
|
+ })
|
|
|
+}
|
|
|
+
|
|
|
+func makeMultiNodeClients(t *testing.T, clus *cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
|
|
|
+ return makeClients(t, clients, func() *member {
|
|
|
+ return clus.Members[rand.Intn(len(clus.Members))]
|
|
|
+ })
|
|
|
+}
|
|
|
+
|
|
|
+func closeClients(t *testing.T, clients []*clientv3.Client) {
|
|
|
+ for _, cli := range clients {
|
|
|
+ if err := cli.Close(); err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|