v3_grpc_test.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. // Copyright 2016 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.package recipe
  14. package integration
  15. import (
  16. "bytes"
  17. "fmt"
  18. "math/rand"
  19. "reflect"
  20. "sort"
  21. "testing"
  22. "time"
  23. "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
  24. "github.com/coreos/etcd/Godeps/_workspace/src/google.golang.org/grpc"
  25. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  26. "github.com/coreos/etcd/storage/storagepb"
  27. )
  28. type clusterV3 struct {
  29. *cluster
  30. conns []*grpc.ClientConn
  31. }
  32. // newClusterGRPC returns a launched cluster with a grpc client connection
  33. // for each cluster member.
  34. func newClusterGRPC(t *testing.T, cfg *clusterConfig) *clusterV3 {
  35. cfg.useV3 = true
  36. cfg.useGRPC = true
  37. clus := &clusterV3{cluster: NewClusterByConfig(t, cfg)}
  38. for _, m := range clus.Members {
  39. conn, err := NewGRPCClient(m)
  40. if err != nil {
  41. t.Fatal(err)
  42. }
  43. clus.conns = append(clus.conns, conn)
  44. }
  45. clus.Launch(t)
  46. return clus
  47. }
  48. func (c *clusterV3) Terminate(t *testing.T) {
  49. for _, conn := range c.conns {
  50. if err := conn.Close(); err != nil {
  51. t.Error(err)
  52. }
  53. }
  54. c.cluster.Terminate(t)
  55. }
  56. func (c *clusterV3) RandConn() *grpc.ClientConn {
  57. return c.conns[rand.Intn(len(c.conns))]
  58. }
  59. // TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
  60. // overwrites it, then checks that the change was applied.
  61. func TestV3PutOverwrite(t *testing.T) {
  62. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  63. defer clus.Terminate(t)
  64. kvc := pb.NewKVClient(clus.RandConn())
  65. key := []byte("foo")
  66. reqput := &pb.PutRequest{Key: key, Value: []byte("bar")}
  67. respput, err := kvc.Put(context.TODO(), reqput)
  68. if err != nil {
  69. t.Fatalf("couldn't put key (%v)", err)
  70. }
  71. // overwrite
  72. reqput.Value = []byte("baz")
  73. respput2, err := kvc.Put(context.TODO(), reqput)
  74. if err != nil {
  75. t.Fatalf("couldn't put key (%v)", err)
  76. }
  77. if respput2.Header.Revision <= respput.Header.Revision {
  78. t.Fatalf("expected newer revision on overwrite, got %v <= %v",
  79. respput2.Header.Revision, respput.Header.Revision)
  80. }
  81. reqrange := &pb.RangeRequest{Key: key}
  82. resprange, err := kvc.Range(context.TODO(), reqrange)
  83. if err != nil {
  84. t.Fatalf("couldn't get key (%v)", err)
  85. }
  86. if len(resprange.Kvs) != 1 {
  87. t.Fatalf("expected 1 key, got %v", len(resprange.Kvs))
  88. }
  89. kv := resprange.Kvs[0]
  90. if kv.ModRevision <= kv.CreateRevision {
  91. t.Errorf("expected modRev > createRev, got %d <= %d",
  92. kv.ModRevision, kv.CreateRevision)
  93. }
  94. if !reflect.DeepEqual(reqput.Value, kv.Value) {
  95. t.Errorf("expected value %v, got %v", reqput.Value, kv.Value)
  96. }
  97. }
  98. // TestV3DeleteRange tests various edge cases in the DeleteRange API.
  99. func TestV3DeleteRange(t *testing.T) {
  100. tests := []struct {
  101. keySet []string
  102. begin string
  103. end string
  104. wantSet [][]byte
  105. }{
  106. // delete middle
  107. {
  108. []string{"foo", "foo/abc", "fop"},
  109. "foo/", "fop",
  110. [][]byte{[]byte("foo"), []byte("fop")},
  111. },
  112. // no delete
  113. {
  114. []string{"foo", "foo/abc", "fop"},
  115. "foo/", "foo/",
  116. [][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")},
  117. },
  118. // delete first
  119. {
  120. []string{"foo", "foo/abc", "fop"},
  121. "fo", "fop",
  122. [][]byte{[]byte("fop")},
  123. },
  124. // delete tail
  125. {
  126. []string{"foo", "foo/abc", "fop"},
  127. "foo/", "fos",
  128. [][]byte{[]byte("foo")},
  129. },
  130. // delete exact
  131. {
  132. []string{"foo", "foo/abc", "fop"},
  133. "foo/abc", "",
  134. [][]byte{[]byte("foo"), []byte("fop")},
  135. },
  136. // delete none, [x,x)
  137. {
  138. []string{"foo"},
  139. "foo", "foo",
  140. [][]byte{[]byte("foo")},
  141. },
  142. }
  143. for i, tt := range tests {
  144. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  145. kvc := pb.NewKVClient(clus.RandConn())
  146. ks := tt.keySet
  147. for j := range ks {
  148. reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
  149. _, err := kvc.Put(context.TODO(), reqput)
  150. if err != nil {
  151. t.Fatalf("couldn't put key (%v)", err)
  152. }
  153. }
  154. dreq := &pb.DeleteRangeRequest{
  155. Key: []byte(tt.begin),
  156. RangeEnd: []byte(tt.end)}
  157. dresp, err := kvc.DeleteRange(context.TODO(), dreq)
  158. if err != nil {
  159. t.Fatalf("couldn't delete range on test %d (%v)", i, err)
  160. }
  161. rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
  162. rresp, err := kvc.Range(context.TODO(), rreq)
  163. if err != nil {
  164. t.Errorf("couldn't get range on test %v (%v)", i, err)
  165. }
  166. if dresp.Header.Revision != rresp.Header.Revision {
  167. t.Errorf("expected revision %v, got %v",
  168. dresp.Header.Revision, rresp.Header.Revision)
  169. }
  170. keys := [][]byte{}
  171. for j := range rresp.Kvs {
  172. keys = append(keys, rresp.Kvs[j].Key)
  173. }
  174. if reflect.DeepEqual(tt.wantSet, keys) == false {
  175. t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
  176. }
  177. // can't defer because tcp ports will be in use
  178. clus.Terminate(t)
  179. }
  180. }
  181. // TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
  182. func TestV3WatchFromCurrentRevision(t *testing.T) {
  183. tests := []struct {
  184. putKeys []string
  185. watchRequest *pb.WatchRequest
  186. wresps []*pb.WatchResponse
  187. }{
  188. // watch the key, matching
  189. {
  190. []string{"foo"},
  191. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},
  192. []*pb.WatchResponse{
  193. {
  194. Header: &pb.ResponseHeader{Revision: 1},
  195. Created: true,
  196. },
  197. {
  198. Header: &pb.ResponseHeader{Revision: 2},
  199. Created: false,
  200. Events: []*storagepb.Event{
  201. {
  202. Type: storagepb.PUT,
  203. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  204. },
  205. },
  206. },
  207. },
  208. },
  209. // watch the key, non-matching
  210. {
  211. []string{"foo"},
  212. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("helloworld")}},
  213. []*pb.WatchResponse{
  214. {
  215. Header: &pb.ResponseHeader{Revision: 1},
  216. Created: true,
  217. },
  218. },
  219. },
  220. // watch the prefix, matching
  221. {
  222. []string{"fooLong"},
  223. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},
  224. []*pb.WatchResponse{
  225. {
  226. Header: &pb.ResponseHeader{Revision: 1},
  227. Created: true,
  228. },
  229. {
  230. Header: &pb.ResponseHeader{Revision: 2},
  231. Created: false,
  232. Events: []*storagepb.Event{
  233. {
  234. Type: storagepb.PUT,
  235. Kv: &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  236. },
  237. },
  238. },
  239. },
  240. },
  241. // watch the prefix, non-matching
  242. {
  243. []string{"foo"},
  244. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("helloworld")}},
  245. []*pb.WatchResponse{
  246. {
  247. Header: &pb.ResponseHeader{Revision: 1},
  248. Created: true,
  249. },
  250. },
  251. },
  252. // multiple puts, one watcher with matching key
  253. {
  254. []string{"foo", "foo", "foo"},
  255. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},
  256. []*pb.WatchResponse{
  257. {
  258. Header: &pb.ResponseHeader{Revision: 1},
  259. Created: true,
  260. },
  261. {
  262. Header: &pb.ResponseHeader{Revision: 2},
  263. Created: false,
  264. Events: []*storagepb.Event{
  265. {
  266. Type: storagepb.PUT,
  267. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  268. },
  269. },
  270. },
  271. {
  272. Header: &pb.ResponseHeader{Revision: 3},
  273. Created: false,
  274. Events: []*storagepb.Event{
  275. {
  276. Type: storagepb.PUT,
  277. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
  278. },
  279. },
  280. },
  281. {
  282. Header: &pb.ResponseHeader{Revision: 4},
  283. Created: false,
  284. Events: []*storagepb.Event{
  285. {
  286. Type: storagepb.PUT,
  287. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
  288. },
  289. },
  290. },
  291. },
  292. },
  293. // multiple puts, one watcher with matching prefix
  294. {
  295. []string{"foo", "foo", "foo"},
  296. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},
  297. []*pb.WatchResponse{
  298. {
  299. Header: &pb.ResponseHeader{Revision: 1},
  300. Created: true,
  301. },
  302. {
  303. Header: &pb.ResponseHeader{Revision: 2},
  304. Created: false,
  305. Events: []*storagepb.Event{
  306. {
  307. Type: storagepb.PUT,
  308. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  309. },
  310. },
  311. },
  312. {
  313. Header: &pb.ResponseHeader{Revision: 3},
  314. Created: false,
  315. Events: []*storagepb.Event{
  316. {
  317. Type: storagepb.PUT,
  318. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
  319. },
  320. },
  321. },
  322. {
  323. Header: &pb.ResponseHeader{Revision: 4},
  324. Created: false,
  325. Events: []*storagepb.Event{
  326. {
  327. Type: storagepb.PUT,
  328. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
  329. },
  330. },
  331. },
  332. },
  333. },
  334. // TODO: watch and receive multiple-events from synced (need Txn)
  335. }
  336. for i, tt := range tests {
  337. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  338. wAPI := pb.NewWatchClient(clus.RandConn())
  339. wStream, err := wAPI.Watch(context.TODO())
  340. if err != nil {
  341. t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
  342. }
  343. if err := wStream.Send(tt.watchRequest); err != nil {
  344. t.Fatalf("#%d: wStream.Send error: %v", i, err)
  345. }
  346. go func() {
  347. for _, k := range tt.putKeys {
  348. kvc := pb.NewKVClient(clus.RandConn())
  349. req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
  350. if _, err := kvc.Put(context.TODO(), req); err != nil {
  351. t.Fatalf("#%d: couldn't put key (%v)", i, err)
  352. }
  353. }
  354. }()
  355. var createdWatchId int64
  356. for j, wresp := range tt.wresps {
  357. resp, err := wStream.Recv()
  358. if err != nil {
  359. t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
  360. }
  361. if resp.Header == nil {
  362. t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
  363. }
  364. if resp.Header.Revision != wresp.Header.Revision {
  365. t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
  366. }
  367. if wresp.Created != resp.Created {
  368. t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
  369. }
  370. if resp.Created {
  371. createdWatchId = resp.WatchId
  372. }
  373. if resp.WatchId != createdWatchId {
  374. t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
  375. }
  376. if !reflect.DeepEqual(resp.Events, wresp.Events) {
  377. t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
  378. }
  379. }
  380. rok, nr := WaitResponse(wStream, 1*time.Second)
  381. if !rok {
  382. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  383. }
  384. // can't defer because tcp ports will be in use
  385. clus.Terminate(t)
  386. }
  387. }
  388. // TestV3WatchCancel tests Watch APIs cancellation.
  389. func TestV3WatchCancel(t *testing.T) {
  390. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  391. wAPI := pb.NewWatchClient(clus.RandConn())
  392. wStream, errW := wAPI.Watch(context.TODO())
  393. if errW != nil {
  394. t.Fatalf("wAPI.Watch error: %v", errW)
  395. }
  396. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}}); err != nil {
  397. t.Fatalf("wStream.Send error: %v", err)
  398. }
  399. wresp, errR := wStream.Recv()
  400. if errR != nil {
  401. t.Errorf("wStream.Recv error: %v", errR)
  402. }
  403. if !wresp.Created {
  404. t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
  405. }
  406. if err := wStream.Send(&pb.WatchRequest{CancelRequest: &pb.WatchCancelRequest{WatchId: wresp.WatchId}}); err != nil {
  407. t.Fatalf("wStream.Send error: %v", err)
  408. }
  409. cresp, err := wStream.Recv()
  410. if err != nil {
  411. t.Errorf("wStream.Recv error: %v", err)
  412. }
  413. if !cresp.Canceled {
  414. t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
  415. }
  416. kvc := pb.NewKVClient(clus.RandConn())
  417. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  418. t.Errorf("couldn't put key (%v)", err)
  419. }
  420. // watch got canceled, so this should block
  421. rok, nr := WaitResponse(wStream, 1*time.Second)
  422. if !rok {
  423. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  424. }
  425. clus.Terminate(t)
  426. }
  427. // TestV3WatchMultiple tests multiple watchers on the same key
  428. // and one watcher with matching prefix. It first puts the key
  429. // that matches all watchers, and another key that matches only
  430. // one watcher to test if it receives expected events.
  431. func TestV3WatchMultiple(t *testing.T) {
  432. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  433. wAPI := pb.NewWatchClient(clus.RandConn())
  434. kvc := pb.NewKVClient(clus.RandConn())
  435. wStream, errW := wAPI.Watch(context.TODO())
  436. if errW != nil {
  437. t.Fatalf("wAPI.Watch error: %v", errW)
  438. }
  439. watchKeyN := 4
  440. for i := 0; i < watchKeyN+1; i++ {
  441. var wreq *pb.WatchRequest
  442. if i < watchKeyN {
  443. wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}}
  444. } else {
  445. wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("fo")}}
  446. }
  447. if err := wStream.Send(wreq); err != nil {
  448. t.Fatalf("wStream.Send error: %v", err)
  449. }
  450. }
  451. ids := make(map[int64]struct{})
  452. for i := 0; i < watchKeyN+1; i++ {
  453. wresp, err := wStream.Recv()
  454. if err != nil {
  455. t.Fatalf("wStream.Recv error: %v", err)
  456. }
  457. if !wresp.Created {
  458. t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
  459. }
  460. ids[wresp.WatchId] = struct{}{}
  461. }
  462. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  463. t.Fatalf("couldn't put key (%v)", err)
  464. }
  465. for i := 0; i < watchKeyN+1; i++ {
  466. wresp, err := wStream.Recv()
  467. if err != nil {
  468. t.Fatalf("wStream.Recv error: %v", err)
  469. }
  470. if _, ok := ids[wresp.WatchId]; !ok {
  471. t.Errorf("watchId %d is not created!", wresp.WatchId)
  472. } else {
  473. delete(ids, wresp.WatchId)
  474. }
  475. if len(wresp.Events) == 0 {
  476. t.Errorf("#%d: no events received", i)
  477. }
  478. for _, ev := range wresp.Events {
  479. if string(ev.Kv.Key) != "foo" {
  480. t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key)
  481. }
  482. if string(ev.Kv.Value) != "bar" {
  483. t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value)
  484. }
  485. }
  486. }
  487. // now put one key that has only one matching watcher
  488. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil {
  489. t.Fatalf("couldn't put key (%v)", err)
  490. }
  491. wresp, err := wStream.Recv()
  492. if err != nil {
  493. t.Errorf("wStream.Recv error: %v", err)
  494. }
  495. if len(wresp.Events) != 1 {
  496. t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events))
  497. }
  498. if string(wresp.Events[0].Kv.Key) != "fo" {
  499. t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key)
  500. }
  501. // now Recv should block because there is no more events coming
  502. rok, nr := WaitResponse(wStream, 1*time.Second)
  503. if !rok {
  504. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  505. }
  506. clus.Terminate(t)
  507. }
  508. // TestV3WatchMultipleEventsFromCurrentRevision tests Watch APIs from current revision
  509. // in cases it receives multiple events.
  510. func TestV3WatchMultipleEventsFromCurrentRevision(t *testing.T) {
  511. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  512. wAPI := pb.NewWatchClient(clus.RandConn())
  513. wStream, err := wAPI.Watch(context.TODO())
  514. if err != nil {
  515. t.Fatalf("wAPI.Watch error: %v", err)
  516. }
  517. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}}); err != nil {
  518. t.Fatalf("wStream.Send error: %v", err)
  519. }
  520. kvc := pb.NewKVClient(clus.RandConn())
  521. txn := pb.TxnRequest{}
  522. for i := 0; i < 3; i++ {
  523. ru := &pb.RequestUnion{}
  524. ru.RequestPut = &pb.PutRequest{Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}
  525. txn.Success = append(txn.Success, ru)
  526. }
  527. tresp, err := kvc.Txn(context.Background(), &txn)
  528. if err != nil {
  529. t.Fatalf("kvc.Txn error: %v", err)
  530. }
  531. if !tresp.Succeeded {
  532. t.Fatalf("kvc.Txn failed: %+v", tresp)
  533. }
  534. events := []*storagepb.Event{}
  535. for len(events) < 3 {
  536. resp, err := wStream.Recv()
  537. if err != nil {
  538. t.Errorf("wStream.Recv error: %v", err)
  539. }
  540. if resp.Created {
  541. continue
  542. }
  543. events = append(events, resp.Events...)
  544. }
  545. sort.Sort(eventsSortByKey(events))
  546. wevents := []*storagepb.Event{
  547. {
  548. Type: storagepb.PUT,
  549. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  550. },
  551. {
  552. Type: storagepb.PUT,
  553. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  554. },
  555. {
  556. Type: storagepb.PUT,
  557. Kv: &storagepb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  558. },
  559. }
  560. if !reflect.DeepEqual(events, wevents) {
  561. t.Errorf("events got = %+v, want = %+v", events, wevents)
  562. }
  563. rok, nr := WaitResponse(wStream, 1*time.Second)
  564. if !rok {
  565. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  566. }
  567. // can't defer because tcp ports will be in use
  568. clus.Terminate(t)
  569. }
  570. type eventsSortByKey []*storagepb.Event
  571. func (evs eventsSortByKey) Len() int { return len(evs) }
  572. func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
  573. func (evs eventsSortByKey) Less(i, j int) bool { return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 }
  574. // WaitResponse waits on the given stream for given duration.
  575. // If there is no more events, true and a nil response will be
  576. // returned closing the WatchClient stream. Or the response will
  577. // be returned.
  578. func WaitResponse(wc pb.Watch_WatchClient, timeout time.Duration) (bool, *pb.WatchResponse) {
  579. rCh := make(chan *pb.WatchResponse)
  580. go func() {
  581. resp, _ := wc.Recv()
  582. rCh <- resp
  583. }()
  584. select {
  585. case nr := <-rCh:
  586. return false, nr
  587. case <-time.After(timeout):
  588. }
  589. wc.CloseSend()
  590. rv, ok := <-rCh
  591. if rv != nil || !ok {
  592. return false, rv
  593. }
  594. return true, nil
  595. }
  596. func TestV3RangeRequest(t *testing.T) {
  597. tests := []struct {
  598. putKeys []string
  599. reqs []pb.RangeRequest
  600. wresps [][]string
  601. wmores []bool
  602. }{
  603. // single key
  604. {
  605. []string{"foo", "bar"},
  606. []pb.RangeRequest{
  607. // exists
  608. {Key: []byte("foo")},
  609. // doesn't exist
  610. {Key: []byte("baz")},
  611. },
  612. [][]string{
  613. {"foo"},
  614. {},
  615. },
  616. []bool{false, false},
  617. },
  618. // multi-key
  619. {
  620. []string{"a", "b", "c", "d", "e"},
  621. []pb.RangeRequest{
  622. // all in range
  623. {Key: []byte("a"), RangeEnd: []byte("z")},
  624. // [b, d)
  625. {Key: []byte("b"), RangeEnd: []byte("d")},
  626. // out of range
  627. {Key: []byte("f"), RangeEnd: []byte("z")},
  628. // [c,c) = empty
  629. {Key: []byte("c"), RangeEnd: []byte("c")},
  630. // [d, b) = empty
  631. {Key: []byte("d"), RangeEnd: []byte("b")},
  632. },
  633. [][]string{
  634. {"a", "b", "c", "d", "e"},
  635. {"b", "c"},
  636. {},
  637. {},
  638. {},
  639. },
  640. []bool{false, false, false, false, false},
  641. },
  642. // revision
  643. {
  644. []string{"a", "b", "c", "d", "e"},
  645. []pb.RangeRequest{
  646. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0},
  647. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 1},
  648. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 2},
  649. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 3},
  650. },
  651. [][]string{
  652. {"a", "b", "c", "d", "e"},
  653. {},
  654. {"a"},
  655. {"a", "b"},
  656. },
  657. []bool{false, false, false, false},
  658. },
  659. // limit
  660. {
  661. []string{"foo", "bar"},
  662. []pb.RangeRequest{
  663. // more
  664. {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1},
  665. // no more
  666. {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2},
  667. },
  668. [][]string{
  669. {"bar"},
  670. {"bar", "foo"},
  671. },
  672. []bool{true, false},
  673. },
  674. // sort
  675. {
  676. []string{"b", "a", "c", "d", "c"},
  677. []pb.RangeRequest{
  678. {
  679. Key: []byte("a"), RangeEnd: []byte("z"),
  680. Limit: 1,
  681. SortOrder: pb.RangeRequest_ASCEND,
  682. SortTarget: pb.RangeRequest_KEY,
  683. },
  684. {
  685. Key: []byte("a"), RangeEnd: []byte("z"),
  686. Limit: 1,
  687. SortOrder: pb.RangeRequest_DESCEND,
  688. SortTarget: pb.RangeRequest_KEY,
  689. },
  690. {
  691. Key: []byte("a"), RangeEnd: []byte("z"),
  692. Limit: 1,
  693. SortOrder: pb.RangeRequest_ASCEND,
  694. SortTarget: pb.RangeRequest_CREATE,
  695. },
  696. {
  697. Key: []byte("a"), RangeEnd: []byte("z"),
  698. Limit: 1,
  699. SortOrder: pb.RangeRequest_DESCEND,
  700. SortTarget: pb.RangeRequest_MOD,
  701. },
  702. {
  703. Key: []byte("z"), RangeEnd: []byte("z"),
  704. Limit: 1,
  705. SortOrder: pb.RangeRequest_DESCEND,
  706. SortTarget: pb.RangeRequest_CREATE,
  707. },
  708. },
  709. [][]string{
  710. {"a"},
  711. {"d"},
  712. {"b"},
  713. {"c"},
  714. {},
  715. },
  716. []bool{true, true, true, true, false},
  717. },
  718. }
  719. for i, tt := range tests {
  720. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  721. for _, k := range tt.putKeys {
  722. kvc := pb.NewKVClient(clus.RandConn())
  723. req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
  724. if _, err := kvc.Put(context.TODO(), req); err != nil {
  725. t.Fatalf("#%d: couldn't put key (%v)", i, err)
  726. }
  727. }
  728. for j, req := range tt.reqs {
  729. kvc := pb.NewKVClient(clus.RandConn())
  730. resp, err := kvc.Range(context.TODO(), &req)
  731. if err != nil {
  732. t.Errorf("#%d.%d: Range error: %v", i, j, err)
  733. continue
  734. }
  735. if len(resp.Kvs) != len(tt.wresps[j]) {
  736. t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
  737. continue
  738. }
  739. for k, wKey := range tt.wresps[j] {
  740. respKey := string(resp.Kvs[k].Key)
  741. if respKey != wKey {
  742. t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
  743. }
  744. }
  745. if resp.More != tt.wmores[j] {
  746. t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
  747. }
  748. wrev := req.Revision
  749. if wrev == 0 {
  750. wrev = int64(len(tt.putKeys) + 1)
  751. }
  752. if resp.Header.Revision != wrev {
  753. t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
  754. }
  755. }
  756. clus.Terminate(t)
  757. }
  758. }