v3_grpc_test.go 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. // Copyright 2016 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.package recipe
  14. package integration
  15. import (
  16. "bytes"
  17. "fmt"
  18. "math/rand"
  19. "reflect"
  20. "sort"
  21. "sync"
  22. "testing"
  23. "time"
  24. "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
  25. "github.com/coreos/etcd/Godeps/_workspace/src/google.golang.org/grpc"
  26. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  27. "github.com/coreos/etcd/lease"
  28. "github.com/coreos/etcd/storage/storagepb"
  29. )
  30. type clusterV3 struct {
  31. *cluster
  32. conns []*grpc.ClientConn
  33. }
  34. // newClusterGRPC returns a launched cluster with a grpc client connection
  35. // for each cluster member.
  36. func newClusterGRPC(t *testing.T, cfg *clusterConfig) *clusterV3 {
  37. cfg.useV3 = true
  38. cfg.useGRPC = true
  39. clus := &clusterV3{cluster: NewClusterByConfig(t, cfg)}
  40. for _, m := range clus.Members {
  41. conn, err := NewGRPCClient(m)
  42. if err != nil {
  43. t.Fatal(err)
  44. }
  45. clus.conns = append(clus.conns, conn)
  46. }
  47. clus.Launch(t)
  48. return clus
  49. }
  50. func (c *clusterV3) Terminate(t *testing.T) {
  51. for _, conn := range c.conns {
  52. if err := conn.Close(); err != nil {
  53. t.Error(err)
  54. }
  55. }
  56. c.cluster.Terminate(t)
  57. }
  58. func (c *clusterV3) RandConn() *grpc.ClientConn {
  59. return c.conns[rand.Intn(len(c.conns))]
  60. }
  61. // TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
  62. // overwrites it, then checks that the change was applied.
  63. func TestV3PutOverwrite(t *testing.T) {
  64. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  65. defer clus.Terminate(t)
  66. kvc := pb.NewKVClient(clus.RandConn())
  67. key := []byte("foo")
  68. reqput := &pb.PutRequest{Key: key, Value: []byte("bar")}
  69. respput, err := kvc.Put(context.TODO(), reqput)
  70. if err != nil {
  71. t.Fatalf("couldn't put key (%v)", err)
  72. }
  73. // overwrite
  74. reqput.Value = []byte("baz")
  75. respput2, err := kvc.Put(context.TODO(), reqput)
  76. if err != nil {
  77. t.Fatalf("couldn't put key (%v)", err)
  78. }
  79. if respput2.Header.Revision <= respput.Header.Revision {
  80. t.Fatalf("expected newer revision on overwrite, got %v <= %v",
  81. respput2.Header.Revision, respput.Header.Revision)
  82. }
  83. reqrange := &pb.RangeRequest{Key: key}
  84. resprange, err := kvc.Range(context.TODO(), reqrange)
  85. if err != nil {
  86. t.Fatalf("couldn't get key (%v)", err)
  87. }
  88. if len(resprange.Kvs) != 1 {
  89. t.Fatalf("expected 1 key, got %v", len(resprange.Kvs))
  90. }
  91. kv := resprange.Kvs[0]
  92. if kv.ModRevision <= kv.CreateRevision {
  93. t.Errorf("expected modRev > createRev, got %d <= %d",
  94. kv.ModRevision, kv.CreateRevision)
  95. }
  96. if !reflect.DeepEqual(reqput.Value, kv.Value) {
  97. t.Errorf("expected value %v, got %v", reqput.Value, kv.Value)
  98. }
  99. }
  100. // TestV3DeleteRange tests various edge cases in the DeleteRange API.
  101. func TestV3DeleteRange(t *testing.T) {
  102. tests := []struct {
  103. keySet []string
  104. begin string
  105. end string
  106. wantSet [][]byte
  107. }{
  108. // delete middle
  109. {
  110. []string{"foo", "foo/abc", "fop"},
  111. "foo/", "fop",
  112. [][]byte{[]byte("foo"), []byte("fop")},
  113. },
  114. // no delete
  115. {
  116. []string{"foo", "foo/abc", "fop"},
  117. "foo/", "foo/",
  118. [][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")},
  119. },
  120. // delete first
  121. {
  122. []string{"foo", "foo/abc", "fop"},
  123. "fo", "fop",
  124. [][]byte{[]byte("fop")},
  125. },
  126. // delete tail
  127. {
  128. []string{"foo", "foo/abc", "fop"},
  129. "foo/", "fos",
  130. [][]byte{[]byte("foo")},
  131. },
  132. // delete exact
  133. {
  134. []string{"foo", "foo/abc", "fop"},
  135. "foo/abc", "",
  136. [][]byte{[]byte("foo"), []byte("fop")},
  137. },
  138. // delete none, [x,x)
  139. {
  140. []string{"foo"},
  141. "foo", "foo",
  142. [][]byte{[]byte("foo")},
  143. },
  144. }
  145. for i, tt := range tests {
  146. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  147. kvc := pb.NewKVClient(clus.RandConn())
  148. ks := tt.keySet
  149. for j := range ks {
  150. reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
  151. _, err := kvc.Put(context.TODO(), reqput)
  152. if err != nil {
  153. t.Fatalf("couldn't put key (%v)", err)
  154. }
  155. }
  156. dreq := &pb.DeleteRangeRequest{
  157. Key: []byte(tt.begin),
  158. RangeEnd: []byte(tt.end)}
  159. dresp, err := kvc.DeleteRange(context.TODO(), dreq)
  160. if err != nil {
  161. t.Fatalf("couldn't delete range on test %d (%v)", i, err)
  162. }
  163. rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
  164. rresp, err := kvc.Range(context.TODO(), rreq)
  165. if err != nil {
  166. t.Errorf("couldn't get range on test %v (%v)", i, err)
  167. }
  168. if dresp.Header.Revision != rresp.Header.Revision {
  169. t.Errorf("expected revision %v, got %v",
  170. dresp.Header.Revision, rresp.Header.Revision)
  171. }
  172. keys := [][]byte{}
  173. for j := range rresp.Kvs {
  174. keys = append(keys, rresp.Kvs[j].Key)
  175. }
  176. if reflect.DeepEqual(tt.wantSet, keys) == false {
  177. t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
  178. }
  179. // can't defer because tcp ports will be in use
  180. clus.Terminate(t)
  181. }
  182. }
  183. // TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
  184. func TestV3WatchFromCurrentRevision(t *testing.T) {
  185. tests := []struct {
  186. putKeys []string
  187. watchRequest *pb.WatchRequest
  188. wresps []*pb.WatchResponse
  189. }{
  190. // watch the key, matching
  191. {
  192. []string{"foo"},
  193. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},
  194. []*pb.WatchResponse{
  195. {
  196. Header: &pb.ResponseHeader{Revision: 1},
  197. Created: true,
  198. },
  199. {
  200. Header: &pb.ResponseHeader{Revision: 2},
  201. Created: false,
  202. Events: []*storagepb.Event{
  203. {
  204. Type: storagepb.PUT,
  205. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  206. },
  207. },
  208. },
  209. },
  210. },
  211. // watch the key, non-matching
  212. {
  213. []string{"foo"},
  214. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("helloworld")}},
  215. []*pb.WatchResponse{
  216. {
  217. Header: &pb.ResponseHeader{Revision: 1},
  218. Created: true,
  219. },
  220. },
  221. },
  222. // watch the prefix, matching
  223. {
  224. []string{"fooLong"},
  225. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},
  226. []*pb.WatchResponse{
  227. {
  228. Header: &pb.ResponseHeader{Revision: 1},
  229. Created: true,
  230. },
  231. {
  232. Header: &pb.ResponseHeader{Revision: 2},
  233. Created: false,
  234. Events: []*storagepb.Event{
  235. {
  236. Type: storagepb.PUT,
  237. Kv: &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  238. },
  239. },
  240. },
  241. },
  242. },
  243. // watch the prefix, non-matching
  244. {
  245. []string{"foo"},
  246. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("helloworld")}},
  247. []*pb.WatchResponse{
  248. {
  249. Header: &pb.ResponseHeader{Revision: 1},
  250. Created: true,
  251. },
  252. },
  253. },
  254. // multiple puts, one watcher with matching key
  255. {
  256. []string{"foo", "foo", "foo"},
  257. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},
  258. []*pb.WatchResponse{
  259. {
  260. Header: &pb.ResponseHeader{Revision: 1},
  261. Created: true,
  262. },
  263. {
  264. Header: &pb.ResponseHeader{Revision: 2},
  265. Created: false,
  266. Events: []*storagepb.Event{
  267. {
  268. Type: storagepb.PUT,
  269. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  270. },
  271. },
  272. },
  273. {
  274. Header: &pb.ResponseHeader{Revision: 3},
  275. Created: false,
  276. Events: []*storagepb.Event{
  277. {
  278. Type: storagepb.PUT,
  279. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
  280. },
  281. },
  282. },
  283. {
  284. Header: &pb.ResponseHeader{Revision: 4},
  285. Created: false,
  286. Events: []*storagepb.Event{
  287. {
  288. Type: storagepb.PUT,
  289. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
  290. },
  291. },
  292. },
  293. },
  294. },
  295. // multiple puts, one watcher with matching prefix
  296. {
  297. []string{"foo", "foo", "foo"},
  298. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},
  299. []*pb.WatchResponse{
  300. {
  301. Header: &pb.ResponseHeader{Revision: 1},
  302. Created: true,
  303. },
  304. {
  305. Header: &pb.ResponseHeader{Revision: 2},
  306. Created: false,
  307. Events: []*storagepb.Event{
  308. {
  309. Type: storagepb.PUT,
  310. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  311. },
  312. },
  313. },
  314. {
  315. Header: &pb.ResponseHeader{Revision: 3},
  316. Created: false,
  317. Events: []*storagepb.Event{
  318. {
  319. Type: storagepb.PUT,
  320. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
  321. },
  322. },
  323. },
  324. {
  325. Header: &pb.ResponseHeader{Revision: 4},
  326. Created: false,
  327. Events: []*storagepb.Event{
  328. {
  329. Type: storagepb.PUT,
  330. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
  331. },
  332. },
  333. },
  334. },
  335. },
  336. }
  337. for i, tt := range tests {
  338. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  339. wAPI := pb.NewWatchClient(clus.RandConn())
  340. wStream, err := wAPI.Watch(context.TODO())
  341. if err != nil {
  342. t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
  343. }
  344. if err := wStream.Send(tt.watchRequest); err != nil {
  345. t.Fatalf("#%d: wStream.Send error: %v", i, err)
  346. }
  347. go func() {
  348. for _, k := range tt.putKeys {
  349. kvc := pb.NewKVClient(clus.RandConn())
  350. req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
  351. if _, err := kvc.Put(context.TODO(), req); err != nil {
  352. t.Fatalf("#%d: couldn't put key (%v)", i, err)
  353. }
  354. }
  355. }()
  356. var createdWatchId int64
  357. for j, wresp := range tt.wresps {
  358. resp, err := wStream.Recv()
  359. if err != nil {
  360. t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
  361. }
  362. if resp.Header == nil {
  363. t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
  364. }
  365. if resp.Header.Revision != wresp.Header.Revision {
  366. t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
  367. }
  368. if wresp.Created != resp.Created {
  369. t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
  370. }
  371. if resp.Created {
  372. createdWatchId = resp.WatchId
  373. }
  374. if resp.WatchId != createdWatchId {
  375. t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
  376. }
  377. if !reflect.DeepEqual(resp.Events, wresp.Events) {
  378. t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
  379. }
  380. }
  381. rok, nr := WaitResponse(wStream, 1*time.Second)
  382. if !rok {
  383. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  384. }
  385. // can't defer because tcp ports will be in use
  386. clus.Terminate(t)
  387. }
  388. }
  389. // TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
  390. func TestV3WatchCancelSynced(t *testing.T) {
  391. testV3WatchCancel(t, 0)
  392. }
  393. // TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
  394. func TestV3WatchCancelUnsynced(t *testing.T) {
  395. testV3WatchCancel(t, 1)
  396. }
  397. func testV3WatchCancel(t *testing.T, startRev int64) {
  398. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  399. wAPI := pb.NewWatchClient(clus.RandConn())
  400. wStream, errW := wAPI.Watch(context.TODO())
  401. if errW != nil {
  402. t.Fatalf("wAPI.Watch error: %v", errW)
  403. }
  404. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}); err != nil {
  405. t.Fatalf("wStream.Send error: %v", err)
  406. }
  407. wresp, errR := wStream.Recv()
  408. if errR != nil {
  409. t.Errorf("wStream.Recv error: %v", errR)
  410. }
  411. if !wresp.Created {
  412. t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
  413. }
  414. if err := wStream.Send(&pb.WatchRequest{CancelRequest: &pb.WatchCancelRequest{WatchId: wresp.WatchId}}); err != nil {
  415. t.Fatalf("wStream.Send error: %v", err)
  416. }
  417. cresp, err := wStream.Recv()
  418. if err != nil {
  419. t.Errorf("wStream.Recv error: %v", err)
  420. }
  421. if !cresp.Canceled {
  422. t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
  423. }
  424. kvc := pb.NewKVClient(clus.RandConn())
  425. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  426. t.Errorf("couldn't put key (%v)", err)
  427. }
  428. // watch got canceled, so this should block
  429. rok, nr := WaitResponse(wStream, 1*time.Second)
  430. if !rok {
  431. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  432. }
  433. clus.Terminate(t)
  434. }
  435. func TestV3WatchMultipleWatchersSynced(t *testing.T) {
  436. testV3WatchMultipleWatchers(t, 0)
  437. }
  438. func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
  439. testV3WatchMultipleWatchers(t, 1)
  440. }
  441. // testV3WatchMultipleWatchers tests multiple watchers on the same key
  442. // and one watcher with matching prefix. It first puts the key
  443. // that matches all watchers, and another key that matches only
  444. // one watcher to test if it receives expected events.
  445. func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
  446. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  447. wAPI := pb.NewWatchClient(clus.RandConn())
  448. kvc := pb.NewKVClient(clus.RandConn())
  449. wStream, errW := wAPI.Watch(context.TODO())
  450. if errW != nil {
  451. t.Fatalf("wAPI.Watch error: %v", errW)
  452. }
  453. watchKeyN := 4
  454. for i := 0; i < watchKeyN+1; i++ {
  455. var wreq *pb.WatchRequest
  456. if i < watchKeyN {
  457. wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}
  458. } else {
  459. wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("fo"), StartRevision: startRev}}
  460. }
  461. if err := wStream.Send(wreq); err != nil {
  462. t.Fatalf("wStream.Send error: %v", err)
  463. }
  464. }
  465. ids := make(map[int64]struct{})
  466. for i := 0; i < watchKeyN+1; i++ {
  467. wresp, err := wStream.Recv()
  468. if err != nil {
  469. t.Fatalf("wStream.Recv error: %v", err)
  470. }
  471. if !wresp.Created {
  472. t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
  473. }
  474. ids[wresp.WatchId] = struct{}{}
  475. }
  476. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  477. t.Fatalf("couldn't put key (%v)", err)
  478. }
  479. for i := 0; i < watchKeyN+1; i++ {
  480. wresp, err := wStream.Recv()
  481. if err != nil {
  482. t.Fatalf("wStream.Recv error: %v", err)
  483. }
  484. if _, ok := ids[wresp.WatchId]; !ok {
  485. t.Errorf("watchId %d is not created!", wresp.WatchId)
  486. } else {
  487. delete(ids, wresp.WatchId)
  488. }
  489. if len(wresp.Events) == 0 {
  490. t.Errorf("#%d: no events received", i)
  491. }
  492. for _, ev := range wresp.Events {
  493. if string(ev.Kv.Key) != "foo" {
  494. t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key)
  495. }
  496. if string(ev.Kv.Value) != "bar" {
  497. t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value)
  498. }
  499. }
  500. }
  501. // now put one key that has only one matching watcher
  502. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil {
  503. t.Fatalf("couldn't put key (%v)", err)
  504. }
  505. wresp, err := wStream.Recv()
  506. if err != nil {
  507. t.Errorf("wStream.Recv error: %v", err)
  508. }
  509. if len(wresp.Events) != 1 {
  510. t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events))
  511. }
  512. if string(wresp.Events[0].Kv.Key) != "fo" {
  513. t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key)
  514. }
  515. // now Recv should block because there is no more events coming
  516. rok, nr := WaitResponse(wStream, 1*time.Second)
  517. if !rok {
  518. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  519. }
  520. clus.Terminate(t)
  521. }
  522. func TestV3WatchMultipleEventsTxnSynced(t *testing.T) {
  523. testV3WatchMultipleEventsTxn(t, 0)
  524. }
  525. func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) {
  526. testV3WatchMultipleEventsTxn(t, 1)
  527. }
  528. // testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events.
  529. func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
  530. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  531. wAPI := pb.NewWatchClient(clus.RandConn())
  532. wStream, wErr := wAPI.Watch(context.TODO())
  533. if wErr != nil {
  534. t.Fatalf("wAPI.Watch error: %v", wErr)
  535. }
  536. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo"), StartRevision: startRev}}); err != nil {
  537. t.Fatalf("wStream.Send error: %v", err)
  538. }
  539. kvc := pb.NewKVClient(clus.RandConn())
  540. txn := pb.TxnRequest{}
  541. for i := 0; i < 3; i++ {
  542. ru := &pb.RequestUnion{}
  543. ru.RequestPut = &pb.PutRequest{Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}
  544. txn.Success = append(txn.Success, ru)
  545. }
  546. tresp, err := kvc.Txn(context.Background(), &txn)
  547. if err != nil {
  548. t.Fatalf("kvc.Txn error: %v", err)
  549. }
  550. if !tresp.Succeeded {
  551. t.Fatalf("kvc.Txn failed: %+v", tresp)
  552. }
  553. events := []*storagepb.Event{}
  554. for len(events) < 3 {
  555. resp, err := wStream.Recv()
  556. if err != nil {
  557. t.Errorf("wStream.Recv error: %v", err)
  558. }
  559. if resp.Created {
  560. continue
  561. }
  562. events = append(events, resp.Events...)
  563. }
  564. sort.Sort(eventsSortByKey(events))
  565. wevents := []*storagepb.Event{
  566. {
  567. Type: storagepb.PUT,
  568. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  569. },
  570. {
  571. Type: storagepb.PUT,
  572. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  573. },
  574. {
  575. Type: storagepb.PUT,
  576. Kv: &storagepb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  577. },
  578. }
  579. if !reflect.DeepEqual(events, wevents) {
  580. t.Errorf("events got = %+v, want = %+v", events, wevents)
  581. }
  582. rok, nr := WaitResponse(wStream, 1*time.Second)
  583. if !rok {
  584. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  585. }
  586. // can't defer because tcp ports will be in use
  587. clus.Terminate(t)
  588. }
  589. type eventsSortByKey []*storagepb.Event
  590. func (evs eventsSortByKey) Len() int { return len(evs) }
  591. func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
  592. func (evs eventsSortByKey) Less(i, j int) bool { return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 }
  593. func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
  594. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  595. defer clus.Terminate(t)
  596. kvc := pb.NewKVClient(clus.RandConn())
  597. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
  598. t.Fatalf("couldn't put key (%v)", err)
  599. }
  600. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
  601. t.Fatalf("couldn't put key (%v)", err)
  602. }
  603. wAPI := pb.NewWatchClient(clus.RandConn())
  604. wStream, wErr := wAPI.Watch(context.TODO())
  605. if wErr != nil {
  606. t.Fatalf("wAPI.Watch error: %v", wErr)
  607. }
  608. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo"), StartRevision: 1}}); err != nil {
  609. t.Fatalf("wStream.Send error: %v", err)
  610. }
  611. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
  612. t.Fatalf("couldn't put key (%v)", err)
  613. }
  614. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
  615. t.Fatalf("couldn't put key (%v)", err)
  616. }
  617. allWevents := []*storagepb.Event{
  618. {
  619. Type: storagepb.PUT,
  620. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  621. },
  622. {
  623. Type: storagepb.PUT,
  624. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 3, Version: 1},
  625. },
  626. {
  627. Type: storagepb.PUT,
  628. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 2},
  629. },
  630. {
  631. Type: storagepb.PUT,
  632. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 5, Version: 2},
  633. },
  634. }
  635. events := []*storagepb.Event{}
  636. for len(events) < 4 {
  637. resp, err := wStream.Recv()
  638. if err != nil {
  639. t.Errorf("wStream.Recv error: %v", err)
  640. }
  641. if resp.Created {
  642. continue
  643. }
  644. events = append(events, resp.Events...)
  645. // if PUT requests are committed by now, first receive would return
  646. // multiple events, but if not, it returns a single event. In SSD,
  647. // it should return 4 events at once.
  648. }
  649. if !reflect.DeepEqual(events, allWevents) {
  650. t.Errorf("events got = %+v, want = %+v", events, allWevents)
  651. }
  652. rok, nr := WaitResponse(wStream, 1*time.Second)
  653. if !rok {
  654. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  655. }
  656. }
  657. func TestV3WatchMultipleStreamsSynced(t *testing.T) {
  658. testV3WatchMultipleStreams(t, 0)
  659. }
  660. func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
  661. testV3WatchMultipleStreams(t, 1)
  662. }
  663. // testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
  664. func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
  665. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  666. wAPI := pb.NewWatchClient(clus.RandConn())
  667. kvc := pb.NewKVClient(clus.RandConn())
  668. streams := make([]pb.Watch_WatchClient, 5)
  669. for i := range streams {
  670. wStream, errW := wAPI.Watch(context.TODO())
  671. if errW != nil {
  672. t.Fatalf("wAPI.Watch error: %v", errW)
  673. }
  674. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}); err != nil {
  675. t.Fatalf("wStream.Send error: %v", err)
  676. }
  677. streams[i] = wStream
  678. }
  679. for _, wStream := range streams {
  680. wresp, err := wStream.Recv()
  681. if err != nil {
  682. t.Fatalf("wStream.Recv error: %v", err)
  683. }
  684. if !wresp.Created {
  685. t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
  686. }
  687. }
  688. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  689. t.Fatalf("couldn't put key (%v)", err)
  690. }
  691. var wg sync.WaitGroup
  692. wg.Add(len(streams))
  693. wevents := []*storagepb.Event{
  694. {
  695. Type: storagepb.PUT,
  696. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  697. },
  698. }
  699. for i := range streams {
  700. go func(i int) {
  701. defer wg.Done()
  702. wStream := streams[i]
  703. wresp, err := wStream.Recv()
  704. if err != nil {
  705. t.Fatalf("wStream.Recv error: %v", err)
  706. }
  707. if wresp.WatchId != 0 {
  708. t.Errorf("watchId got = %d, want = 0", wresp.WatchId)
  709. }
  710. if !reflect.DeepEqual(wresp.Events, wevents) {
  711. t.Errorf("wresp.Events got = %+v, want = %+v", wresp.Events, wevents)
  712. }
  713. // now Recv should block because there is no more events coming
  714. rok, nr := WaitResponse(wStream, 1*time.Second)
  715. if !rok {
  716. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  717. }
  718. }(i)
  719. }
  720. wg.Wait()
  721. clus.Terminate(t)
  722. }
  723. // WaitResponse waits on the given stream for given duration.
  724. // If there is no more events, true and a nil response will be
  725. // returned closing the WatchClient stream. Or the response will
  726. // be returned.
  727. func WaitResponse(wc pb.Watch_WatchClient, timeout time.Duration) (bool, *pb.WatchResponse) {
  728. rCh := make(chan *pb.WatchResponse)
  729. go func() {
  730. resp, _ := wc.Recv()
  731. rCh <- resp
  732. }()
  733. select {
  734. case nr := <-rCh:
  735. return false, nr
  736. case <-time.After(timeout):
  737. }
  738. wc.CloseSend()
  739. rv, ok := <-rCh
  740. if rv != nil || !ok {
  741. return false, rv
  742. }
  743. return true, nil
  744. }
  745. func TestV3RangeRequest(t *testing.T) {
  746. tests := []struct {
  747. putKeys []string
  748. reqs []pb.RangeRequest
  749. wresps [][]string
  750. wmores []bool
  751. }{
  752. // single key
  753. {
  754. []string{"foo", "bar"},
  755. []pb.RangeRequest{
  756. // exists
  757. {Key: []byte("foo")},
  758. // doesn't exist
  759. {Key: []byte("baz")},
  760. },
  761. [][]string{
  762. {"foo"},
  763. {},
  764. },
  765. []bool{false, false},
  766. },
  767. // multi-key
  768. {
  769. []string{"a", "b", "c", "d", "e"},
  770. []pb.RangeRequest{
  771. // all in range
  772. {Key: []byte("a"), RangeEnd: []byte("z")},
  773. // [b, d)
  774. {Key: []byte("b"), RangeEnd: []byte("d")},
  775. // out of range
  776. {Key: []byte("f"), RangeEnd: []byte("z")},
  777. // [c,c) = empty
  778. {Key: []byte("c"), RangeEnd: []byte("c")},
  779. // [d, b) = empty
  780. {Key: []byte("d"), RangeEnd: []byte("b")},
  781. },
  782. [][]string{
  783. {"a", "b", "c", "d", "e"},
  784. {"b", "c"},
  785. {},
  786. {},
  787. {},
  788. },
  789. []bool{false, false, false, false, false},
  790. },
  791. // revision
  792. {
  793. []string{"a", "b", "c", "d", "e"},
  794. []pb.RangeRequest{
  795. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0},
  796. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 1},
  797. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 2},
  798. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 3},
  799. },
  800. [][]string{
  801. {"a", "b", "c", "d", "e"},
  802. {},
  803. {"a"},
  804. {"a", "b"},
  805. },
  806. []bool{false, false, false, false},
  807. },
  808. // limit
  809. {
  810. []string{"foo", "bar"},
  811. []pb.RangeRequest{
  812. // more
  813. {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1},
  814. // no more
  815. {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2},
  816. },
  817. [][]string{
  818. {"bar"},
  819. {"bar", "foo"},
  820. },
  821. []bool{true, false},
  822. },
  823. // sort
  824. {
  825. []string{"b", "a", "c", "d", "c"},
  826. []pb.RangeRequest{
  827. {
  828. Key: []byte("a"), RangeEnd: []byte("z"),
  829. Limit: 1,
  830. SortOrder: pb.RangeRequest_ASCEND,
  831. SortTarget: pb.RangeRequest_KEY,
  832. },
  833. {
  834. Key: []byte("a"), RangeEnd: []byte("z"),
  835. Limit: 1,
  836. SortOrder: pb.RangeRequest_DESCEND,
  837. SortTarget: pb.RangeRequest_KEY,
  838. },
  839. {
  840. Key: []byte("a"), RangeEnd: []byte("z"),
  841. Limit: 1,
  842. SortOrder: pb.RangeRequest_ASCEND,
  843. SortTarget: pb.RangeRequest_CREATE,
  844. },
  845. {
  846. Key: []byte("a"), RangeEnd: []byte("z"),
  847. Limit: 1,
  848. SortOrder: pb.RangeRequest_DESCEND,
  849. SortTarget: pb.RangeRequest_MOD,
  850. },
  851. {
  852. Key: []byte("z"), RangeEnd: []byte("z"),
  853. Limit: 1,
  854. SortOrder: pb.RangeRequest_DESCEND,
  855. SortTarget: pb.RangeRequest_CREATE,
  856. },
  857. },
  858. [][]string{
  859. {"a"},
  860. {"d"},
  861. {"b"},
  862. {"c"},
  863. {},
  864. },
  865. []bool{true, true, true, true, false},
  866. },
  867. }
  868. for i, tt := range tests {
  869. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  870. for _, k := range tt.putKeys {
  871. kvc := pb.NewKVClient(clus.RandConn())
  872. req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
  873. if _, err := kvc.Put(context.TODO(), req); err != nil {
  874. t.Fatalf("#%d: couldn't put key (%v)", i, err)
  875. }
  876. }
  877. for j, req := range tt.reqs {
  878. kvc := pb.NewKVClient(clus.RandConn())
  879. resp, err := kvc.Range(context.TODO(), &req)
  880. if err != nil {
  881. t.Errorf("#%d.%d: Range error: %v", i, j, err)
  882. continue
  883. }
  884. if len(resp.Kvs) != len(tt.wresps[j]) {
  885. t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
  886. continue
  887. }
  888. for k, wKey := range tt.wresps[j] {
  889. respKey := string(resp.Kvs[k].Key)
  890. if respKey != wKey {
  891. t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
  892. }
  893. }
  894. if resp.More != tt.wmores[j] {
  895. t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
  896. }
  897. wrev := req.Revision
  898. if wrev == 0 {
  899. wrev = int64(len(tt.putKeys) + 1)
  900. }
  901. if resp.Header.Revision != wrev {
  902. t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
  903. }
  904. }
  905. clus.Terminate(t)
  906. }
  907. }
  908. // TestV3LeaseRevoke ensures a key is deleted once its lease is revoked.
  909. func TestV3LeaseRevoke(t *testing.T) {
  910. testLeaseRemoveLeasedKey(t, func(clus *clusterV3, leaseID int64) error {
  911. lc := pb.NewLeaseClient(clus.RandConn())
  912. _, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
  913. return err
  914. })
  915. }
  916. // TestV3LeaseCreateById ensures leases may be created by a given id.
  917. func TestV3LeaseCreateByID(t *testing.T) {
  918. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  919. defer clus.Terminate(t)
  920. // create fixed lease
  921. lresp, err := pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
  922. context.TODO(),
  923. &pb.LeaseCreateRequest{ID: 1, TTL: 1})
  924. if err != nil {
  925. t.Errorf("could not create lease 1 (%v)", err)
  926. }
  927. if lresp.ID != 1 {
  928. t.Errorf("got id %v, wanted id %v", lresp.ID)
  929. }
  930. // create duplicate fixed lease
  931. lresp, err = pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
  932. context.TODO(),
  933. &pb.LeaseCreateRequest{ID: 1, TTL: 1})
  934. if err != nil {
  935. t.Error(err)
  936. }
  937. if lresp.ID != 0 || lresp.Error != lease.ErrLeaseExists.Error() {
  938. t.Errorf("got id %v, wanted id 0 (%v)", lresp.ID, lresp.Error)
  939. }
  940. // create fresh fixed lease
  941. lresp, err = pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
  942. context.TODO(),
  943. &pb.LeaseCreateRequest{ID: 2, TTL: 1})
  944. if err != nil {
  945. t.Errorf("could not create lease 2 (%v)", err)
  946. }
  947. if lresp.ID != 2 {
  948. t.Errorf("got id %v, wanted id %v", lresp.ID)
  949. }
  950. }
  951. // TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
  952. func TestV3LeaseKeepAlive(t *testing.T) {
  953. testLeaseRemoveLeasedKey(t, func(clus *clusterV3, leaseID int64) error {
  954. lc := pb.NewLeaseClient(clus.RandConn())
  955. lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
  956. lac, err := lc.LeaseKeepAlive(context.TODO())
  957. if err != nil {
  958. return err
  959. }
  960. defer lac.CloseSend()
  961. // renew long enough so lease would've expired otherwise
  962. for i := 0; i < 3; i++ {
  963. if err = lac.Send(lreq); err != nil {
  964. return err
  965. }
  966. lresp, rxerr := lac.Recv()
  967. if rxerr != nil {
  968. return rxerr
  969. }
  970. if lresp.ID != leaseID {
  971. return fmt.Errorf("expected lease ID %v, got %v", leaseID, lresp.ID)
  972. }
  973. time.Sleep(time.Duration(lresp.TTL/2) * time.Second)
  974. }
  975. _, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
  976. return err
  977. })
  978. }
  979. // TestV3LeaseExists creates a lease on a random client, then sends a keepalive on another
  980. // client to confirm it's visible to the whole cluster.
  981. func TestV3LeaseExists(t *testing.T) {
  982. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  983. defer clus.Terminate(t)
  984. // create lease
  985. lresp, err := pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
  986. context.TODO(),
  987. &pb.LeaseCreateRequest{TTL: 30})
  988. if err != nil {
  989. t.Fatal(err)
  990. }
  991. if lresp.Error != "" {
  992. t.Fatal(lresp.Error)
  993. }
  994. // confirm keepalive
  995. lac, err := pb.NewLeaseClient(clus.RandConn()).LeaseKeepAlive(context.TODO())
  996. if err != nil {
  997. t.Fatal(err)
  998. }
  999. defer lac.CloseSend()
  1000. if err = lac.Send(&pb.LeaseKeepAliveRequest{ID: lresp.ID}); err != nil {
  1001. t.Fatal(err)
  1002. }
  1003. if _, err = lac.Recv(); err != nil {
  1004. t.Fatal(err)
  1005. }
  1006. }
  1007. // acquireLeaseAndKey creates a new lease and creates an attached key.
  1008. func acquireLeaseAndKey(clus *clusterV3, key string) (int64, error) {
  1009. // create lease
  1010. lresp, err := pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
  1011. context.TODO(),
  1012. &pb.LeaseCreateRequest{TTL: 1})
  1013. if err != nil {
  1014. return 0, err
  1015. }
  1016. if lresp.Error != "" {
  1017. return 0, fmt.Errorf(lresp.Error)
  1018. }
  1019. // attach to key
  1020. put := &pb.PutRequest{Key: []byte(key), Lease: lresp.ID}
  1021. if _, err := pb.NewKVClient(clus.RandConn()).Put(context.TODO(), put); err != nil {
  1022. return 0, err
  1023. }
  1024. return lresp.ID, nil
  1025. }
  1026. // testLeaseRemoveLeasedKey performs some action while holding a lease with an
  1027. // attached key "foo", then confirms the key is gone.
  1028. func testLeaseRemoveLeasedKey(t *testing.T, act func(*clusterV3, int64) error) {
  1029. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  1030. defer clus.Terminate(t)
  1031. leaseID, err := acquireLeaseAndKey(clus, "foo")
  1032. if err != nil {
  1033. t.Fatal(err)
  1034. }
  1035. if err = act(clus, leaseID); err != nil {
  1036. t.Fatal(err)
  1037. }
  1038. // confirm no key
  1039. rreq := &pb.RangeRequest{Key: []byte("foo")}
  1040. rresp, err := pb.NewKVClient(clus.RandConn()).Range(context.TODO(), rreq)
  1041. if err != nil {
  1042. t.Fatal(err)
  1043. }
  1044. if len(rresp.Kvs) != 0 {
  1045. t.Fatalf("lease removed but key remains")
  1046. }
  1047. }