v3_lock_test.go 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package integration
  15. import (
  16. "context"
  17. "math/rand"
  18. "sync"
  19. "testing"
  20. "time"
  21. "go.etcd.io/etcd/clientv3"
  22. "go.etcd.io/etcd/clientv3/concurrency"
  23. recipe "go.etcd.io/etcd/contrib/recipes"
  24. "go.etcd.io/etcd/mvcc/mvccpb"
  25. "go.etcd.io/etcd/pkg/testutil"
  26. )
  27. func TestMutexLockSingleNode(t *testing.T) {
  28. clus := NewClusterV3(t, &ClusterConfig{Size: 3})
  29. defer clus.Terminate(t)
  30. var clients []*clientv3.Client
  31. testMutexLock(t, 5, makeSingleNodeClients(t, clus.cluster, &clients))
  32. closeClients(t, clients)
  33. }
  34. func TestMutexLockMultiNode(t *testing.T) {
  35. clus := NewClusterV3(t, &ClusterConfig{Size: 3})
  36. defer clus.Terminate(t)
  37. var clients []*clientv3.Client
  38. testMutexLock(t, 5, makeMultiNodeClients(t, clus.cluster, &clients))
  39. closeClients(t, clients)
  40. }
  41. func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
  42. // stream lock acquisitions
  43. lockedC := make(chan *concurrency.Mutex)
  44. for i := 0; i < waiters; i++ {
  45. go func() {
  46. session, err := concurrency.NewSession(chooseClient())
  47. if err != nil {
  48. t.Error(err)
  49. }
  50. m := concurrency.NewMutex(session, "test-mutex")
  51. if err := m.Lock(context.TODO()); err != nil {
  52. t.Errorf("could not wait on lock (%v)", err)
  53. }
  54. lockedC <- m
  55. }()
  56. }
  57. // unlock locked mutexes
  58. timerC := time.After(time.Duration(waiters) * time.Second)
  59. for i := 0; i < waiters; i++ {
  60. select {
  61. case <-timerC:
  62. t.Fatalf("timed out waiting for lock %d", i)
  63. case m := <-lockedC:
  64. // lock acquired with m
  65. select {
  66. case <-lockedC:
  67. t.Fatalf("lock %d followers did not wait", i)
  68. default:
  69. }
  70. if err := m.Unlock(context.TODO()); err != nil {
  71. t.Fatalf("could not release lock (%v)", err)
  72. }
  73. }
  74. }
  75. }
  76. func TestMutexTryLockSingleNode(t *testing.T) {
  77. clus := NewClusterV3(t, &ClusterConfig{Size: 3})
  78. defer clus.Terminate(t)
  79. var clients []*clientv3.Client
  80. testMutexTryLock(t, 5, makeSingleNodeClients(t, clus.cluster, &clients))
  81. closeClients(t, clients)
  82. }
  83. func TestMutexTryLockMultiNode(t *testing.T) {
  84. clus := NewClusterV3(t, &ClusterConfig{Size: 3})
  85. defer clus.Terminate(t)
  86. var clients []*clientv3.Client
  87. testMutexTryLock(t, 5, makeMultiNodeClients(t, clus.cluster, &clients))
  88. closeClients(t, clients)
  89. }
  90. func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) {
  91. lockedC := make(chan *concurrency.Mutex)
  92. notlockedC := make(chan *concurrency.Mutex)
  93. for i := 0; i < lockers; i++ {
  94. go func() {
  95. session, err := concurrency.NewSession(chooseClient())
  96. if err != nil {
  97. t.Error(err)
  98. }
  99. m := concurrency.NewMutex(session, "test-mutex-try-lock")
  100. err = m.TryLock(context.TODO())
  101. if err == nil {
  102. lockedC <- m
  103. } else if err == concurrency.ErrLocked {
  104. notlockedC <- m
  105. } else {
  106. t.Errorf("Unexpected Error %v", err)
  107. }
  108. }()
  109. }
  110. timerC := time.After(time.Second)
  111. select {
  112. case <-lockedC:
  113. for i := 0; i < lockers-1; i++ {
  114. select {
  115. case <-lockedC:
  116. t.Fatalf("Multiple Mutes locked on same key")
  117. case <-notlockedC:
  118. case <-timerC:
  119. t.Errorf("timed out waiting for lock")
  120. }
  121. }
  122. case <-timerC:
  123. t.Errorf("timed out waiting for lock")
  124. }
  125. }
  126. // TestMutexSessionRelock ensures that acquiring the same lock with the same
  127. // session will not result in deadlock.
  128. func TestMutexSessionRelock(t *testing.T) {
  129. clus := NewClusterV3(t, &ClusterConfig{Size: 3})
  130. defer clus.Terminate(t)
  131. session, err := concurrency.NewSession(clus.RandClient())
  132. if err != nil {
  133. t.Error(err)
  134. }
  135. m := concurrency.NewMutex(session, "test-mutex")
  136. if err := m.Lock(context.TODO()); err != nil {
  137. t.Fatal(err)
  138. }
  139. m2 := concurrency.NewMutex(session, "test-mutex")
  140. if err := m2.Lock(context.TODO()); err != nil {
  141. t.Fatal(err)
  142. }
  143. }
  144. // TestMutexWaitsOnCurrentHolder ensures a mutex is only acquired once all
  145. // waiters older than the new owner are gone by testing the case where
  146. // the waiter prior to the acquirer expires before the current holder.
  147. func TestMutexWaitsOnCurrentHolder(t *testing.T) {
  148. defer testutil.AfterTest(t)
  149. clus := NewClusterV3(t, &ClusterConfig{Size: 1})
  150. defer clus.Terminate(t)
  151. cctx := context.Background()
  152. cli := clus.Client(0)
  153. firstOwnerSession, err := concurrency.NewSession(cli)
  154. if err != nil {
  155. t.Error(err)
  156. }
  157. defer firstOwnerSession.Close()
  158. firstOwnerMutex := concurrency.NewMutex(firstOwnerSession, "test-mutex")
  159. if err = firstOwnerMutex.Lock(cctx); err != nil {
  160. t.Fatal(err)
  161. }
  162. victimSession, err := concurrency.NewSession(cli)
  163. if err != nil {
  164. t.Error(err)
  165. }
  166. defer victimSession.Close()
  167. victimDonec := make(chan struct{})
  168. go func() {
  169. defer close(victimDonec)
  170. concurrency.NewMutex(victimSession, "test-mutex").Lock(cctx)
  171. }()
  172. // ensure mutexes associated with firstOwnerSession and victimSession waits before new owner
  173. wch := cli.Watch(cctx, "test-mutex", clientv3.WithPrefix(), clientv3.WithRev(1))
  174. putCounts := 0
  175. for putCounts < 2 {
  176. select {
  177. case wrp := <-wch:
  178. putCounts += len(wrp.Events)
  179. case <-time.After(time.Second):
  180. t.Fatal("failed to receive watch response")
  181. }
  182. }
  183. if putCounts != 2 {
  184. t.Fatalf("expect 2 put events, but got %v", putCounts)
  185. }
  186. newOwnerSession, err := concurrency.NewSession(cli)
  187. if err != nil {
  188. t.Error(err)
  189. }
  190. defer newOwnerSession.Close()
  191. newOwnerDonec := make(chan struct{})
  192. go func() {
  193. defer close(newOwnerDonec)
  194. concurrency.NewMutex(newOwnerSession, "test-mutex").Lock(cctx)
  195. }()
  196. select {
  197. case wrp := <-wch:
  198. if len(wrp.Events) != 1 {
  199. t.Fatalf("expect a event, but got %v events", len(wrp.Events))
  200. }
  201. if e := wrp.Events[0]; e.Type != mvccpb.PUT {
  202. t.Fatalf("expect a put event on prefix test-mutex, but got event type %v", e.Type)
  203. }
  204. case <-time.After(time.Second):
  205. t.Fatalf("failed to receive a watch response")
  206. }
  207. // simulate losing the client that's next in line to acquire the lock
  208. victimSession.Close()
  209. // ensures the deletion of victim waiter from server side.
  210. select {
  211. case wrp := <-wch:
  212. if len(wrp.Events) != 1 {
  213. t.Fatalf("expect a event, but got %v events", len(wrp.Events))
  214. }
  215. if e := wrp.Events[0]; e.Type != mvccpb.DELETE {
  216. t.Fatalf("expect a delete event on prefix test-mutex, but got event type %v", e.Type)
  217. }
  218. case <-time.After(time.Second):
  219. t.Fatal("failed to receive a watch response")
  220. }
  221. select {
  222. case <-newOwnerDonec:
  223. t.Fatal("new owner obtained lock before first owner unlocked")
  224. default:
  225. }
  226. if err := firstOwnerMutex.Unlock(cctx); err != nil {
  227. t.Fatal(err)
  228. }
  229. select {
  230. case <-newOwnerDonec:
  231. case <-time.After(time.Second):
  232. t.Fatal("new owner failed to obtain lock")
  233. }
  234. select {
  235. case <-victimDonec:
  236. case <-time.After(time.Second):
  237. t.Fatal("victim mutex failed to exit after first owner releases lock")
  238. }
  239. }
  240. func BenchmarkMutex4Waiters(b *testing.B) {
  241. // XXX switch tests to use TB interface
  242. clus := NewClusterV3(nil, &ClusterConfig{Size: 3})
  243. defer clus.Terminate(nil)
  244. for i := 0; i < b.N; i++ {
  245. testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() })
  246. }
  247. }
  248. func TestRWMutexSingleNode(t *testing.T) {
  249. clus := NewClusterV3(t, &ClusterConfig{Size: 3})
  250. defer clus.Terminate(t)
  251. testRWMutex(t, 5, func() *clientv3.Client { return clus.clients[0] })
  252. }
  253. func TestRWMutexMultiNode(t *testing.T) {
  254. clus := NewClusterV3(t, &ClusterConfig{Size: 3})
  255. defer clus.Terminate(t)
  256. testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() })
  257. }
  258. func testRWMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
  259. // stream rwlock acquistions
  260. rlockedC := make(chan *recipe.RWMutex, 1)
  261. wlockedC := make(chan *recipe.RWMutex, 1)
  262. for i := 0; i < waiters; i++ {
  263. go func() {
  264. session, err := concurrency.NewSession(chooseClient())
  265. if err != nil {
  266. t.Error(err)
  267. }
  268. rwm := recipe.NewRWMutex(session, "test-rwmutex")
  269. if rand.Intn(2) == 0 {
  270. if err := rwm.RLock(); err != nil {
  271. t.Errorf("could not rlock (%v)", err)
  272. }
  273. rlockedC <- rwm
  274. } else {
  275. if err := rwm.Lock(); err != nil {
  276. t.Errorf("could not lock (%v)", err)
  277. }
  278. wlockedC <- rwm
  279. }
  280. }()
  281. }
  282. // unlock locked rwmutexes
  283. timerC := time.After(time.Duration(waiters) * time.Second)
  284. for i := 0; i < waiters; i++ {
  285. select {
  286. case <-timerC:
  287. t.Fatalf("timed out waiting for lock %d", i)
  288. case wl := <-wlockedC:
  289. select {
  290. case <-rlockedC:
  291. t.Fatalf("rlock %d readers did not wait", i)
  292. default:
  293. }
  294. if err := wl.Unlock(); err != nil {
  295. t.Fatalf("could not release lock (%v)", err)
  296. }
  297. case rl := <-rlockedC:
  298. select {
  299. case <-wlockedC:
  300. t.Fatalf("rlock %d writers did not wait", i)
  301. default:
  302. }
  303. if err := rl.RUnlock(); err != nil {
  304. t.Fatalf("could not release rlock (%v)", err)
  305. }
  306. }
  307. }
  308. }
  309. func makeClients(t *testing.T, clients *[]*clientv3.Client, choose func() *member) func() *clientv3.Client {
  310. var mu sync.Mutex
  311. *clients = nil
  312. return func() *clientv3.Client {
  313. cli, err := NewClientV3(choose())
  314. if err != nil {
  315. t.Fatalf("cannot create client: %v", err)
  316. }
  317. mu.Lock()
  318. *clients = append(*clients, cli)
  319. mu.Unlock()
  320. return cli
  321. }
  322. }
  323. func makeSingleNodeClients(t *testing.T, clus *cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
  324. return makeClients(t, clients, func() *member {
  325. return clus.Members[0]
  326. })
  327. }
  328. func makeMultiNodeClients(t *testing.T, clus *cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
  329. return makeClients(t, clients, func() *member {
  330. return clus.Members[rand.Intn(len(clus.Members))]
  331. })
  332. }
  333. func closeClients(t *testing.T, clients []*clientv3.Client) {
  334. for _, cli := range clients {
  335. if err := cli.Close(); err != nil {
  336. t.Fatal(err)
  337. }
  338. }
  339. }