v3_grpc_test.go 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227
  1. // Copyright 2016 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.package recipe
  14. package integration
  15. import (
  16. "bytes"
  17. "fmt"
  18. "math/rand"
  19. "reflect"
  20. "sort"
  21. "sync"
  22. "testing"
  23. "time"
  24. "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
  25. "github.com/coreos/etcd/Godeps/_workspace/src/google.golang.org/grpc"
  26. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  27. "github.com/coreos/etcd/lease"
  28. "github.com/coreos/etcd/storage/storagepb"
  29. )
  30. type clusterV3 struct {
  31. *cluster
  32. conns []*grpc.ClientConn
  33. }
  34. // newClusterGRPC returns a launched cluster with a grpc client connection
  35. // for each cluster member.
  36. func newClusterGRPC(t *testing.T, cfg *clusterConfig) *clusterV3 {
  37. cfg.useV3 = true
  38. cfg.useGRPC = true
  39. clus := &clusterV3{cluster: NewClusterByConfig(t, cfg)}
  40. for _, m := range clus.Members {
  41. conn, err := NewGRPCClient(m)
  42. if err != nil {
  43. t.Fatal(err)
  44. }
  45. clus.conns = append(clus.conns, conn)
  46. }
  47. clus.Launch(t)
  48. return clus
  49. }
  50. func (c *clusterV3) Terminate(t *testing.T) {
  51. for _, conn := range c.conns {
  52. if err := conn.Close(); err != nil {
  53. t.Error(err)
  54. }
  55. }
  56. c.cluster.Terminate(t)
  57. }
  58. func (c *clusterV3) RandConn() *grpc.ClientConn {
  59. return c.conns[rand.Intn(len(c.conns))]
  60. }
  61. // TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
  62. // overwrites it, then checks that the change was applied.
  63. func TestV3PutOverwrite(t *testing.T) {
  64. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  65. defer clus.Terminate(t)
  66. kvc := pb.NewKVClient(clus.RandConn())
  67. key := []byte("foo")
  68. reqput := &pb.PutRequest{Key: key, Value: []byte("bar")}
  69. respput, err := kvc.Put(context.TODO(), reqput)
  70. if err != nil {
  71. t.Fatalf("couldn't put key (%v)", err)
  72. }
  73. // overwrite
  74. reqput.Value = []byte("baz")
  75. respput2, err := kvc.Put(context.TODO(), reqput)
  76. if err != nil {
  77. t.Fatalf("couldn't put key (%v)", err)
  78. }
  79. if respput2.Header.Revision <= respput.Header.Revision {
  80. t.Fatalf("expected newer revision on overwrite, got %v <= %v",
  81. respput2.Header.Revision, respput.Header.Revision)
  82. }
  83. reqrange := &pb.RangeRequest{Key: key}
  84. resprange, err := kvc.Range(context.TODO(), reqrange)
  85. if err != nil {
  86. t.Fatalf("couldn't get key (%v)", err)
  87. }
  88. if len(resprange.Kvs) != 1 {
  89. t.Fatalf("expected 1 key, got %v", len(resprange.Kvs))
  90. }
  91. kv := resprange.Kvs[0]
  92. if kv.ModRevision <= kv.CreateRevision {
  93. t.Errorf("expected modRev > createRev, got %d <= %d",
  94. kv.ModRevision, kv.CreateRevision)
  95. }
  96. if !reflect.DeepEqual(reqput.Value, kv.Value) {
  97. t.Errorf("expected value %v, got %v", reqput.Value, kv.Value)
  98. }
  99. }
  100. // TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
  101. func TestV3PutMissingLease(t *testing.T) {
  102. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  103. defer clus.Terminate(t)
  104. kvc := pb.NewKVClient(clus.RandConn())
  105. key := []byte("foo")
  106. preq := &pb.PutRequest{Key: key, Lease: 123456}
  107. tests := []func(){
  108. // put case
  109. func() {
  110. if presp, err := kvc.Put(context.TODO(), preq); err == nil {
  111. t.Errorf("succeeded put key. req: %v. resp: %v", preq, presp)
  112. }
  113. },
  114. // txn success case
  115. func() {
  116. txn := &pb.TxnRequest{}
  117. txn.Success = append(txn.Success, &pb.RequestUnion{RequestPut: preq})
  118. if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
  119. t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp)
  120. }
  121. },
  122. // txn failure case
  123. func() {
  124. txn := &pb.TxnRequest{}
  125. txn.Failure = append(txn.Failure, &pb.RequestUnion{RequestPut: preq})
  126. cmp := &pb.Compare{
  127. Result: pb.Compare_GREATER,
  128. Target: pb.Compare_CREATE,
  129. Key: []byte("bar"),
  130. }
  131. txn.Compare = append(txn.Compare, cmp)
  132. if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
  133. t.Errorf("succeeded txn failure. req: %v. resp: %v", txn, tresp)
  134. }
  135. },
  136. // ignore bad lease in failure on success txn
  137. func() {
  138. txn := &pb.TxnRequest{}
  139. rreq := &pb.RangeRequest{Key: []byte("bar")}
  140. txn.Success = append(txn.Success, &pb.RequestUnion{RequestRange: rreq})
  141. txn.Failure = append(txn.Failure, &pb.RequestUnion{RequestPut: preq})
  142. if tresp, err := kvc.Txn(context.TODO(), txn); err != nil {
  143. t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp)
  144. }
  145. },
  146. }
  147. for i, f := range tests {
  148. f()
  149. // key shouldn't have been stored
  150. rreq := &pb.RangeRequest{Key: key}
  151. rresp, err := kvc.Range(context.TODO(), rreq)
  152. if err != nil {
  153. t.Errorf("#%d. could not rangereq (%v)", i, err)
  154. } else if len(rresp.Kvs) != 0 {
  155. t.Errorf("#%d. expected no keys, got %v", i, rresp)
  156. }
  157. }
  158. }
  159. // TestV3DeleteRange tests various edge cases in the DeleteRange API.
  160. func TestV3DeleteRange(t *testing.T) {
  161. tests := []struct {
  162. keySet []string
  163. begin string
  164. end string
  165. wantSet [][]byte
  166. }{
  167. // delete middle
  168. {
  169. []string{"foo", "foo/abc", "fop"},
  170. "foo/", "fop",
  171. [][]byte{[]byte("foo"), []byte("fop")},
  172. },
  173. // no delete
  174. {
  175. []string{"foo", "foo/abc", "fop"},
  176. "foo/", "foo/",
  177. [][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")},
  178. },
  179. // delete first
  180. {
  181. []string{"foo", "foo/abc", "fop"},
  182. "fo", "fop",
  183. [][]byte{[]byte("fop")},
  184. },
  185. // delete tail
  186. {
  187. []string{"foo", "foo/abc", "fop"},
  188. "foo/", "fos",
  189. [][]byte{[]byte("foo")},
  190. },
  191. // delete exact
  192. {
  193. []string{"foo", "foo/abc", "fop"},
  194. "foo/abc", "",
  195. [][]byte{[]byte("foo"), []byte("fop")},
  196. },
  197. // delete none, [x,x)
  198. {
  199. []string{"foo"},
  200. "foo", "foo",
  201. [][]byte{[]byte("foo")},
  202. },
  203. }
  204. for i, tt := range tests {
  205. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  206. kvc := pb.NewKVClient(clus.RandConn())
  207. ks := tt.keySet
  208. for j := range ks {
  209. reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
  210. _, err := kvc.Put(context.TODO(), reqput)
  211. if err != nil {
  212. t.Fatalf("couldn't put key (%v)", err)
  213. }
  214. }
  215. dreq := &pb.DeleteRangeRequest{
  216. Key: []byte(tt.begin),
  217. RangeEnd: []byte(tt.end)}
  218. dresp, err := kvc.DeleteRange(context.TODO(), dreq)
  219. if err != nil {
  220. t.Fatalf("couldn't delete range on test %d (%v)", i, err)
  221. }
  222. rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
  223. rresp, err := kvc.Range(context.TODO(), rreq)
  224. if err != nil {
  225. t.Errorf("couldn't get range on test %v (%v)", i, err)
  226. }
  227. if dresp.Header.Revision != rresp.Header.Revision {
  228. t.Errorf("expected revision %v, got %v",
  229. dresp.Header.Revision, rresp.Header.Revision)
  230. }
  231. keys := [][]byte{}
  232. for j := range rresp.Kvs {
  233. keys = append(keys, rresp.Kvs[j].Key)
  234. }
  235. if reflect.DeepEqual(tt.wantSet, keys) == false {
  236. t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
  237. }
  238. // can't defer because tcp ports will be in use
  239. clus.Terminate(t)
  240. }
  241. }
  242. // TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
  243. func TestV3WatchFromCurrentRevision(t *testing.T) {
  244. tests := []struct {
  245. putKeys []string
  246. watchRequest *pb.WatchRequest
  247. wresps []*pb.WatchResponse
  248. }{
  249. // watch the key, matching
  250. {
  251. []string{"foo"},
  252. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},
  253. []*pb.WatchResponse{
  254. {
  255. Header: &pb.ResponseHeader{Revision: 1},
  256. Created: true,
  257. },
  258. {
  259. Header: &pb.ResponseHeader{Revision: 2},
  260. Created: false,
  261. Events: []*storagepb.Event{
  262. {
  263. Type: storagepb.PUT,
  264. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  265. },
  266. },
  267. },
  268. },
  269. },
  270. // watch the key, non-matching
  271. {
  272. []string{"foo"},
  273. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("helloworld")}},
  274. []*pb.WatchResponse{
  275. {
  276. Header: &pb.ResponseHeader{Revision: 1},
  277. Created: true,
  278. },
  279. },
  280. },
  281. // watch the prefix, matching
  282. {
  283. []string{"fooLong"},
  284. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},
  285. []*pb.WatchResponse{
  286. {
  287. Header: &pb.ResponseHeader{Revision: 1},
  288. Created: true,
  289. },
  290. {
  291. Header: &pb.ResponseHeader{Revision: 2},
  292. Created: false,
  293. Events: []*storagepb.Event{
  294. {
  295. Type: storagepb.PUT,
  296. Kv: &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  297. },
  298. },
  299. },
  300. },
  301. },
  302. // watch the prefix, non-matching
  303. {
  304. []string{"foo"},
  305. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("helloworld")}},
  306. []*pb.WatchResponse{
  307. {
  308. Header: &pb.ResponseHeader{Revision: 1},
  309. Created: true,
  310. },
  311. },
  312. },
  313. // multiple puts, one watcher with matching key
  314. {
  315. []string{"foo", "foo", "foo"},
  316. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},
  317. []*pb.WatchResponse{
  318. {
  319. Header: &pb.ResponseHeader{Revision: 1},
  320. Created: true,
  321. },
  322. {
  323. Header: &pb.ResponseHeader{Revision: 2},
  324. Created: false,
  325. Events: []*storagepb.Event{
  326. {
  327. Type: storagepb.PUT,
  328. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  329. },
  330. },
  331. },
  332. {
  333. Header: &pb.ResponseHeader{Revision: 3},
  334. Created: false,
  335. Events: []*storagepb.Event{
  336. {
  337. Type: storagepb.PUT,
  338. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
  339. },
  340. },
  341. },
  342. {
  343. Header: &pb.ResponseHeader{Revision: 4},
  344. Created: false,
  345. Events: []*storagepb.Event{
  346. {
  347. Type: storagepb.PUT,
  348. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
  349. },
  350. },
  351. },
  352. },
  353. },
  354. // multiple puts, one watcher with matching prefix
  355. {
  356. []string{"foo", "foo", "foo"},
  357. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},
  358. []*pb.WatchResponse{
  359. {
  360. Header: &pb.ResponseHeader{Revision: 1},
  361. Created: true,
  362. },
  363. {
  364. Header: &pb.ResponseHeader{Revision: 2},
  365. Created: false,
  366. Events: []*storagepb.Event{
  367. {
  368. Type: storagepb.PUT,
  369. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  370. },
  371. },
  372. },
  373. {
  374. Header: &pb.ResponseHeader{Revision: 3},
  375. Created: false,
  376. Events: []*storagepb.Event{
  377. {
  378. Type: storagepb.PUT,
  379. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
  380. },
  381. },
  382. },
  383. {
  384. Header: &pb.ResponseHeader{Revision: 4},
  385. Created: false,
  386. Events: []*storagepb.Event{
  387. {
  388. Type: storagepb.PUT,
  389. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
  390. },
  391. },
  392. },
  393. },
  394. },
  395. }
  396. for i, tt := range tests {
  397. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  398. wAPI := pb.NewWatchClient(clus.RandConn())
  399. wStream, err := wAPI.Watch(context.TODO())
  400. if err != nil {
  401. t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
  402. }
  403. if err := wStream.Send(tt.watchRequest); err != nil {
  404. t.Fatalf("#%d: wStream.Send error: %v", i, err)
  405. }
  406. go func() {
  407. for _, k := range tt.putKeys {
  408. kvc := pb.NewKVClient(clus.RandConn())
  409. req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
  410. if _, err := kvc.Put(context.TODO(), req); err != nil {
  411. t.Fatalf("#%d: couldn't put key (%v)", i, err)
  412. }
  413. }
  414. }()
  415. var createdWatchId int64
  416. for j, wresp := range tt.wresps {
  417. resp, err := wStream.Recv()
  418. if err != nil {
  419. t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
  420. }
  421. if resp.Header == nil {
  422. t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
  423. }
  424. if resp.Header.Revision != wresp.Header.Revision {
  425. t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
  426. }
  427. if wresp.Created != resp.Created {
  428. t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
  429. }
  430. if resp.Created {
  431. createdWatchId = resp.WatchId
  432. }
  433. if resp.WatchId != createdWatchId {
  434. t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
  435. }
  436. if !reflect.DeepEqual(resp.Events, wresp.Events) {
  437. t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
  438. }
  439. }
  440. rok, nr := WaitResponse(wStream, 1*time.Second)
  441. if !rok {
  442. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  443. }
  444. // can't defer because tcp ports will be in use
  445. clus.Terminate(t)
  446. }
  447. }
  448. // TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
  449. func TestV3WatchCancelSynced(t *testing.T) {
  450. testV3WatchCancel(t, 0)
  451. }
  452. // TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
  453. func TestV3WatchCancelUnsynced(t *testing.T) {
  454. testV3WatchCancel(t, 1)
  455. }
  456. func testV3WatchCancel(t *testing.T, startRev int64) {
  457. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  458. wAPI := pb.NewWatchClient(clus.RandConn())
  459. wStream, errW := wAPI.Watch(context.TODO())
  460. if errW != nil {
  461. t.Fatalf("wAPI.Watch error: %v", errW)
  462. }
  463. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}); err != nil {
  464. t.Fatalf("wStream.Send error: %v", err)
  465. }
  466. wresp, errR := wStream.Recv()
  467. if errR != nil {
  468. t.Errorf("wStream.Recv error: %v", errR)
  469. }
  470. if !wresp.Created {
  471. t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
  472. }
  473. if err := wStream.Send(&pb.WatchRequest{CancelRequest: &pb.WatchCancelRequest{WatchId: wresp.WatchId}}); err != nil {
  474. t.Fatalf("wStream.Send error: %v", err)
  475. }
  476. cresp, err := wStream.Recv()
  477. if err != nil {
  478. t.Errorf("wStream.Recv error: %v", err)
  479. }
  480. if !cresp.Canceled {
  481. t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
  482. }
  483. kvc := pb.NewKVClient(clus.RandConn())
  484. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  485. t.Errorf("couldn't put key (%v)", err)
  486. }
  487. // watch got canceled, so this should block
  488. rok, nr := WaitResponse(wStream, 1*time.Second)
  489. if !rok {
  490. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  491. }
  492. clus.Terminate(t)
  493. }
  494. func TestV3WatchMultipleWatchersSynced(t *testing.T) {
  495. testV3WatchMultipleWatchers(t, 0)
  496. }
  497. func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
  498. testV3WatchMultipleWatchers(t, 1)
  499. }
  500. // testV3WatchMultipleWatchers tests multiple watchers on the same key
  501. // and one watcher with matching prefix. It first puts the key
  502. // that matches all watchers, and another key that matches only
  503. // one watcher to test if it receives expected events.
  504. func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
  505. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  506. wAPI := pb.NewWatchClient(clus.RandConn())
  507. kvc := pb.NewKVClient(clus.RandConn())
  508. wStream, errW := wAPI.Watch(context.TODO())
  509. if errW != nil {
  510. t.Fatalf("wAPI.Watch error: %v", errW)
  511. }
  512. watchKeyN := 4
  513. for i := 0; i < watchKeyN+1; i++ {
  514. var wreq *pb.WatchRequest
  515. if i < watchKeyN {
  516. wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}
  517. } else {
  518. wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("fo"), StartRevision: startRev}}
  519. }
  520. if err := wStream.Send(wreq); err != nil {
  521. t.Fatalf("wStream.Send error: %v", err)
  522. }
  523. }
  524. ids := make(map[int64]struct{})
  525. for i := 0; i < watchKeyN+1; i++ {
  526. wresp, err := wStream.Recv()
  527. if err != nil {
  528. t.Fatalf("wStream.Recv error: %v", err)
  529. }
  530. if !wresp.Created {
  531. t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
  532. }
  533. ids[wresp.WatchId] = struct{}{}
  534. }
  535. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  536. t.Fatalf("couldn't put key (%v)", err)
  537. }
  538. for i := 0; i < watchKeyN+1; i++ {
  539. wresp, err := wStream.Recv()
  540. if err != nil {
  541. t.Fatalf("wStream.Recv error: %v", err)
  542. }
  543. if _, ok := ids[wresp.WatchId]; !ok {
  544. t.Errorf("watchId %d is not created!", wresp.WatchId)
  545. } else {
  546. delete(ids, wresp.WatchId)
  547. }
  548. if len(wresp.Events) == 0 {
  549. t.Errorf("#%d: no events received", i)
  550. }
  551. for _, ev := range wresp.Events {
  552. if string(ev.Kv.Key) != "foo" {
  553. t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key)
  554. }
  555. if string(ev.Kv.Value) != "bar" {
  556. t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value)
  557. }
  558. }
  559. }
  560. // now put one key that has only one matching watcher
  561. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil {
  562. t.Fatalf("couldn't put key (%v)", err)
  563. }
  564. wresp, err := wStream.Recv()
  565. if err != nil {
  566. t.Errorf("wStream.Recv error: %v", err)
  567. }
  568. if len(wresp.Events) != 1 {
  569. t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events))
  570. }
  571. if string(wresp.Events[0].Kv.Key) != "fo" {
  572. t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key)
  573. }
  574. // now Recv should block because there is no more events coming
  575. rok, nr := WaitResponse(wStream, 1*time.Second)
  576. if !rok {
  577. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  578. }
  579. clus.Terminate(t)
  580. }
  581. func TestV3WatchMultipleEventsTxnSynced(t *testing.T) {
  582. testV3WatchMultipleEventsTxn(t, 0)
  583. }
  584. func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) {
  585. testV3WatchMultipleEventsTxn(t, 1)
  586. }
  587. // testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events.
  588. func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
  589. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  590. wAPI := pb.NewWatchClient(clus.RandConn())
  591. wStream, wErr := wAPI.Watch(context.TODO())
  592. if wErr != nil {
  593. t.Fatalf("wAPI.Watch error: %v", wErr)
  594. }
  595. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo"), StartRevision: startRev}}); err != nil {
  596. t.Fatalf("wStream.Send error: %v", err)
  597. }
  598. kvc := pb.NewKVClient(clus.RandConn())
  599. txn := pb.TxnRequest{}
  600. for i := 0; i < 3; i++ {
  601. ru := &pb.RequestUnion{}
  602. ru.RequestPut = &pb.PutRequest{Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}
  603. txn.Success = append(txn.Success, ru)
  604. }
  605. tresp, err := kvc.Txn(context.Background(), &txn)
  606. if err != nil {
  607. t.Fatalf("kvc.Txn error: %v", err)
  608. }
  609. if !tresp.Succeeded {
  610. t.Fatalf("kvc.Txn failed: %+v", tresp)
  611. }
  612. events := []*storagepb.Event{}
  613. for len(events) < 3 {
  614. resp, err := wStream.Recv()
  615. if err != nil {
  616. t.Errorf("wStream.Recv error: %v", err)
  617. }
  618. if resp.Created {
  619. continue
  620. }
  621. events = append(events, resp.Events...)
  622. }
  623. sort.Sort(eventsSortByKey(events))
  624. wevents := []*storagepb.Event{
  625. {
  626. Type: storagepb.PUT,
  627. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  628. },
  629. {
  630. Type: storagepb.PUT,
  631. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  632. },
  633. {
  634. Type: storagepb.PUT,
  635. Kv: &storagepb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  636. },
  637. }
  638. if !reflect.DeepEqual(events, wevents) {
  639. t.Errorf("events got = %+v, want = %+v", events, wevents)
  640. }
  641. rok, nr := WaitResponse(wStream, 1*time.Second)
  642. if !rok {
  643. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  644. }
  645. // can't defer because tcp ports will be in use
  646. clus.Terminate(t)
  647. }
  648. type eventsSortByKey []*storagepb.Event
  649. func (evs eventsSortByKey) Len() int { return len(evs) }
  650. func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
  651. func (evs eventsSortByKey) Less(i, j int) bool { return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 }
  652. func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
  653. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  654. defer clus.Terminate(t)
  655. kvc := pb.NewKVClient(clus.RandConn())
  656. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
  657. t.Fatalf("couldn't put key (%v)", err)
  658. }
  659. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
  660. t.Fatalf("couldn't put key (%v)", err)
  661. }
  662. wAPI := pb.NewWatchClient(clus.RandConn())
  663. wStream, wErr := wAPI.Watch(context.TODO())
  664. if wErr != nil {
  665. t.Fatalf("wAPI.Watch error: %v", wErr)
  666. }
  667. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo"), StartRevision: 1}}); err != nil {
  668. t.Fatalf("wStream.Send error: %v", err)
  669. }
  670. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
  671. t.Fatalf("couldn't put key (%v)", err)
  672. }
  673. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
  674. t.Fatalf("couldn't put key (%v)", err)
  675. }
  676. allWevents := []*storagepb.Event{
  677. {
  678. Type: storagepb.PUT,
  679. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  680. },
  681. {
  682. Type: storagepb.PUT,
  683. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 3, Version: 1},
  684. },
  685. {
  686. Type: storagepb.PUT,
  687. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 2},
  688. },
  689. {
  690. Type: storagepb.PUT,
  691. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 5, Version: 2},
  692. },
  693. }
  694. events := []*storagepb.Event{}
  695. for len(events) < 4 {
  696. resp, err := wStream.Recv()
  697. if err != nil {
  698. t.Errorf("wStream.Recv error: %v", err)
  699. }
  700. if resp.Created {
  701. continue
  702. }
  703. events = append(events, resp.Events...)
  704. // if PUT requests are committed by now, first receive would return
  705. // multiple events, but if not, it returns a single event. In SSD,
  706. // it should return 4 events at once.
  707. }
  708. if !reflect.DeepEqual(events, allWevents) {
  709. t.Errorf("events got = %+v, want = %+v", events, allWevents)
  710. }
  711. rok, nr := WaitResponse(wStream, 1*time.Second)
  712. if !rok {
  713. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  714. }
  715. }
  716. func TestV3WatchMultipleStreamsSynced(t *testing.T) {
  717. testV3WatchMultipleStreams(t, 0)
  718. }
  719. func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
  720. testV3WatchMultipleStreams(t, 1)
  721. }
  722. // testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
  723. func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
  724. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  725. wAPI := pb.NewWatchClient(clus.RandConn())
  726. kvc := pb.NewKVClient(clus.RandConn())
  727. streams := make([]pb.Watch_WatchClient, 5)
  728. for i := range streams {
  729. wStream, errW := wAPI.Watch(context.TODO())
  730. if errW != nil {
  731. t.Fatalf("wAPI.Watch error: %v", errW)
  732. }
  733. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}); err != nil {
  734. t.Fatalf("wStream.Send error: %v", err)
  735. }
  736. streams[i] = wStream
  737. }
  738. for _, wStream := range streams {
  739. wresp, err := wStream.Recv()
  740. if err != nil {
  741. t.Fatalf("wStream.Recv error: %v", err)
  742. }
  743. if !wresp.Created {
  744. t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
  745. }
  746. }
  747. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  748. t.Fatalf("couldn't put key (%v)", err)
  749. }
  750. var wg sync.WaitGroup
  751. wg.Add(len(streams))
  752. wevents := []*storagepb.Event{
  753. {
  754. Type: storagepb.PUT,
  755. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  756. },
  757. }
  758. for i := range streams {
  759. go func(i int) {
  760. defer wg.Done()
  761. wStream := streams[i]
  762. wresp, err := wStream.Recv()
  763. if err != nil {
  764. t.Fatalf("wStream.Recv error: %v", err)
  765. }
  766. if wresp.WatchId != 0 {
  767. t.Errorf("watchId got = %d, want = 0", wresp.WatchId)
  768. }
  769. if !reflect.DeepEqual(wresp.Events, wevents) {
  770. t.Errorf("wresp.Events got = %+v, want = %+v", wresp.Events, wevents)
  771. }
  772. // now Recv should block because there is no more events coming
  773. rok, nr := WaitResponse(wStream, 1*time.Second)
  774. if !rok {
  775. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  776. }
  777. }(i)
  778. }
  779. wg.Wait()
  780. clus.Terminate(t)
  781. }
  782. // WaitResponse waits on the given stream for given duration.
  783. // If there is no more events, true and a nil response will be
  784. // returned closing the WatchClient stream. Or the response will
  785. // be returned.
  786. func WaitResponse(wc pb.Watch_WatchClient, timeout time.Duration) (bool, *pb.WatchResponse) {
  787. rCh := make(chan *pb.WatchResponse)
  788. go func() {
  789. resp, _ := wc.Recv()
  790. rCh <- resp
  791. }()
  792. select {
  793. case nr := <-rCh:
  794. return false, nr
  795. case <-time.After(timeout):
  796. }
  797. wc.CloseSend()
  798. rv, ok := <-rCh
  799. if rv != nil || !ok {
  800. return false, rv
  801. }
  802. return true, nil
  803. }
  804. func TestV3RangeRequest(t *testing.T) {
  805. tests := []struct {
  806. putKeys []string
  807. reqs []pb.RangeRequest
  808. wresps [][]string
  809. wmores []bool
  810. }{
  811. // single key
  812. {
  813. []string{"foo", "bar"},
  814. []pb.RangeRequest{
  815. // exists
  816. {Key: []byte("foo")},
  817. // doesn't exist
  818. {Key: []byte("baz")},
  819. },
  820. [][]string{
  821. {"foo"},
  822. {},
  823. },
  824. []bool{false, false},
  825. },
  826. // multi-key
  827. {
  828. []string{"a", "b", "c", "d", "e"},
  829. []pb.RangeRequest{
  830. // all in range
  831. {Key: []byte("a"), RangeEnd: []byte("z")},
  832. // [b, d)
  833. {Key: []byte("b"), RangeEnd: []byte("d")},
  834. // out of range
  835. {Key: []byte("f"), RangeEnd: []byte("z")},
  836. // [c,c) = empty
  837. {Key: []byte("c"), RangeEnd: []byte("c")},
  838. // [d, b) = empty
  839. {Key: []byte("d"), RangeEnd: []byte("b")},
  840. },
  841. [][]string{
  842. {"a", "b", "c", "d", "e"},
  843. {"b", "c"},
  844. {},
  845. {},
  846. {},
  847. },
  848. []bool{false, false, false, false, false},
  849. },
  850. // revision
  851. {
  852. []string{"a", "b", "c", "d", "e"},
  853. []pb.RangeRequest{
  854. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0},
  855. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 1},
  856. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 2},
  857. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 3},
  858. },
  859. [][]string{
  860. {"a", "b", "c", "d", "e"},
  861. {},
  862. {"a"},
  863. {"a", "b"},
  864. },
  865. []bool{false, false, false, false},
  866. },
  867. // limit
  868. {
  869. []string{"foo", "bar"},
  870. []pb.RangeRequest{
  871. // more
  872. {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1},
  873. // no more
  874. {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2},
  875. },
  876. [][]string{
  877. {"bar"},
  878. {"bar", "foo"},
  879. },
  880. []bool{true, false},
  881. },
  882. // sort
  883. {
  884. []string{"b", "a", "c", "d", "c"},
  885. []pb.RangeRequest{
  886. {
  887. Key: []byte("a"), RangeEnd: []byte("z"),
  888. Limit: 1,
  889. SortOrder: pb.RangeRequest_ASCEND,
  890. SortTarget: pb.RangeRequest_KEY,
  891. },
  892. {
  893. Key: []byte("a"), RangeEnd: []byte("z"),
  894. Limit: 1,
  895. SortOrder: pb.RangeRequest_DESCEND,
  896. SortTarget: pb.RangeRequest_KEY,
  897. },
  898. {
  899. Key: []byte("a"), RangeEnd: []byte("z"),
  900. Limit: 1,
  901. SortOrder: pb.RangeRequest_ASCEND,
  902. SortTarget: pb.RangeRequest_CREATE,
  903. },
  904. {
  905. Key: []byte("a"), RangeEnd: []byte("z"),
  906. Limit: 1,
  907. SortOrder: pb.RangeRequest_DESCEND,
  908. SortTarget: pb.RangeRequest_MOD,
  909. },
  910. {
  911. Key: []byte("z"), RangeEnd: []byte("z"),
  912. Limit: 1,
  913. SortOrder: pb.RangeRequest_DESCEND,
  914. SortTarget: pb.RangeRequest_CREATE,
  915. },
  916. },
  917. [][]string{
  918. {"a"},
  919. {"d"},
  920. {"b"},
  921. {"c"},
  922. {},
  923. },
  924. []bool{true, true, true, true, false},
  925. },
  926. }
  927. for i, tt := range tests {
  928. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  929. for _, k := range tt.putKeys {
  930. kvc := pb.NewKVClient(clus.RandConn())
  931. req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
  932. if _, err := kvc.Put(context.TODO(), req); err != nil {
  933. t.Fatalf("#%d: couldn't put key (%v)", i, err)
  934. }
  935. }
  936. for j, req := range tt.reqs {
  937. kvc := pb.NewKVClient(clus.RandConn())
  938. resp, err := kvc.Range(context.TODO(), &req)
  939. if err != nil {
  940. t.Errorf("#%d.%d: Range error: %v", i, j, err)
  941. continue
  942. }
  943. if len(resp.Kvs) != len(tt.wresps[j]) {
  944. t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
  945. continue
  946. }
  947. for k, wKey := range tt.wresps[j] {
  948. respKey := string(resp.Kvs[k].Key)
  949. if respKey != wKey {
  950. t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
  951. }
  952. }
  953. if resp.More != tt.wmores[j] {
  954. t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
  955. }
  956. wrev := req.Revision
  957. if wrev == 0 {
  958. wrev = int64(len(tt.putKeys) + 1)
  959. }
  960. if resp.Header.Revision != wrev {
  961. t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
  962. }
  963. }
  964. clus.Terminate(t)
  965. }
  966. }
  967. // TestV3LeaseRevoke ensures a key is deleted once its lease is revoked.
  968. func TestV3LeaseRevoke(t *testing.T) {
  969. testLeaseRemoveLeasedKey(t, func(clus *clusterV3, leaseID int64) error {
  970. lc := pb.NewLeaseClient(clus.RandConn())
  971. _, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
  972. return err
  973. })
  974. }
  975. // TestV3LeaseCreateById ensures leases may be created by a given id.
  976. func TestV3LeaseCreateByID(t *testing.T) {
  977. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  978. defer clus.Terminate(t)
  979. // create fixed lease
  980. lresp, err := pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
  981. context.TODO(),
  982. &pb.LeaseCreateRequest{ID: 1, TTL: 1})
  983. if err != nil {
  984. t.Errorf("could not create lease 1 (%v)", err)
  985. }
  986. if lresp.ID != 1 {
  987. t.Errorf("got id %v, wanted id %v", lresp.ID)
  988. }
  989. // create duplicate fixed lease
  990. lresp, err = pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
  991. context.TODO(),
  992. &pb.LeaseCreateRequest{ID: 1, TTL: 1})
  993. if err != nil {
  994. t.Error(err)
  995. }
  996. if lresp.ID != 0 || lresp.Error != lease.ErrLeaseExists.Error() {
  997. t.Errorf("got id %v, wanted id 0 (%v)", lresp.ID, lresp.Error)
  998. }
  999. // create fresh fixed lease
  1000. lresp, err = pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
  1001. context.TODO(),
  1002. &pb.LeaseCreateRequest{ID: 2, TTL: 1})
  1003. if err != nil {
  1004. t.Errorf("could not create lease 2 (%v)", err)
  1005. }
  1006. if lresp.ID != 2 {
  1007. t.Errorf("got id %v, wanted id %v", lresp.ID)
  1008. }
  1009. }
  1010. // TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
  1011. func TestV3LeaseKeepAlive(t *testing.T) {
  1012. testLeaseRemoveLeasedKey(t, func(clus *clusterV3, leaseID int64) error {
  1013. lc := pb.NewLeaseClient(clus.RandConn())
  1014. lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
  1015. lac, err := lc.LeaseKeepAlive(context.TODO())
  1016. if err != nil {
  1017. return err
  1018. }
  1019. defer lac.CloseSend()
  1020. // renew long enough so lease would've expired otherwise
  1021. for i := 0; i < 3; i++ {
  1022. if err = lac.Send(lreq); err != nil {
  1023. return err
  1024. }
  1025. lresp, rxerr := lac.Recv()
  1026. if rxerr != nil {
  1027. return rxerr
  1028. }
  1029. if lresp.ID != leaseID {
  1030. return fmt.Errorf("expected lease ID %v, got %v", leaseID, lresp.ID)
  1031. }
  1032. time.Sleep(time.Duration(lresp.TTL/2) * time.Second)
  1033. }
  1034. _, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
  1035. return err
  1036. })
  1037. }
  1038. // TestV3LeaseExists creates a lease on a random client, then sends a keepalive on another
  1039. // client to confirm it's visible to the whole cluster.
  1040. func TestV3LeaseExists(t *testing.T) {
  1041. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  1042. defer clus.Terminate(t)
  1043. // create lease
  1044. lresp, err := pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
  1045. context.TODO(),
  1046. &pb.LeaseCreateRequest{TTL: 30})
  1047. if err != nil {
  1048. t.Fatal(err)
  1049. }
  1050. if lresp.Error != "" {
  1051. t.Fatal(lresp.Error)
  1052. }
  1053. // confirm keepalive
  1054. lac, err := pb.NewLeaseClient(clus.RandConn()).LeaseKeepAlive(context.TODO())
  1055. if err != nil {
  1056. t.Fatal(err)
  1057. }
  1058. defer lac.CloseSend()
  1059. if err = lac.Send(&pb.LeaseKeepAliveRequest{ID: lresp.ID}); err != nil {
  1060. t.Fatal(err)
  1061. }
  1062. if _, err = lac.Recv(); err != nil {
  1063. t.Fatal(err)
  1064. }
  1065. }
  1066. // acquireLeaseAndKey creates a new lease and creates an attached key.
  1067. func acquireLeaseAndKey(clus *clusterV3, key string) (int64, error) {
  1068. // create lease
  1069. lresp, err := pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
  1070. context.TODO(),
  1071. &pb.LeaseCreateRequest{TTL: 1})
  1072. if err != nil {
  1073. return 0, err
  1074. }
  1075. if lresp.Error != "" {
  1076. return 0, fmt.Errorf(lresp.Error)
  1077. }
  1078. // attach to key
  1079. put := &pb.PutRequest{Key: []byte(key), Lease: lresp.ID}
  1080. if _, err := pb.NewKVClient(clus.RandConn()).Put(context.TODO(), put); err != nil {
  1081. return 0, err
  1082. }
  1083. return lresp.ID, nil
  1084. }
  1085. // testLeaseRemoveLeasedKey performs some action while holding a lease with an
  1086. // attached key "foo", then confirms the key is gone.
  1087. func testLeaseRemoveLeasedKey(t *testing.T, act func(*clusterV3, int64) error) {
  1088. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  1089. defer clus.Terminate(t)
  1090. leaseID, err := acquireLeaseAndKey(clus, "foo")
  1091. if err != nil {
  1092. t.Fatal(err)
  1093. }
  1094. if err = act(clus, leaseID); err != nil {
  1095. t.Fatal(err)
  1096. }
  1097. // confirm no key
  1098. rreq := &pb.RangeRequest{Key: []byte("foo")}
  1099. rresp, err := pb.NewKVClient(clus.RandConn()).Range(context.TODO(), rreq)
  1100. if err != nil {
  1101. t.Fatal(err)
  1102. }
  1103. if len(rresp.Kvs) != 0 {
  1104. t.Fatalf("lease removed but key remains")
  1105. }
  1106. }