v3_grpc_test.go 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. // Copyright 2016 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.package recipe
  14. package integration
  15. import (
  16. "bytes"
  17. "fmt"
  18. "math/rand"
  19. "reflect"
  20. "sort"
  21. "sync"
  22. "testing"
  23. "time"
  24. "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
  25. "github.com/coreos/etcd/Godeps/_workspace/src/google.golang.org/grpc"
  26. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  27. "github.com/coreos/etcd/storage/storagepb"
  28. )
  29. type clusterV3 struct {
  30. *cluster
  31. conns []*grpc.ClientConn
  32. }
  33. // newClusterGRPC returns a launched cluster with a grpc client connection
  34. // for each cluster member.
  35. func newClusterGRPC(t *testing.T, cfg *clusterConfig) *clusterV3 {
  36. cfg.useV3 = true
  37. cfg.useGRPC = true
  38. clus := &clusterV3{cluster: NewClusterByConfig(t, cfg)}
  39. for _, m := range clus.Members {
  40. conn, err := NewGRPCClient(m)
  41. if err != nil {
  42. t.Fatal(err)
  43. }
  44. clus.conns = append(clus.conns, conn)
  45. }
  46. clus.Launch(t)
  47. return clus
  48. }
  49. func (c *clusterV3) Terminate(t *testing.T) {
  50. for _, conn := range c.conns {
  51. if err := conn.Close(); err != nil {
  52. t.Error(err)
  53. }
  54. }
  55. c.cluster.Terminate(t)
  56. }
  57. func (c *clusterV3) RandConn() *grpc.ClientConn {
  58. return c.conns[rand.Intn(len(c.conns))]
  59. }
  60. // TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
  61. // overwrites it, then checks that the change was applied.
  62. func TestV3PutOverwrite(t *testing.T) {
  63. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  64. defer clus.Terminate(t)
  65. kvc := pb.NewKVClient(clus.RandConn())
  66. key := []byte("foo")
  67. reqput := &pb.PutRequest{Key: key, Value: []byte("bar")}
  68. respput, err := kvc.Put(context.TODO(), reqput)
  69. if err != nil {
  70. t.Fatalf("couldn't put key (%v)", err)
  71. }
  72. // overwrite
  73. reqput.Value = []byte("baz")
  74. respput2, err := kvc.Put(context.TODO(), reqput)
  75. if err != nil {
  76. t.Fatalf("couldn't put key (%v)", err)
  77. }
  78. if respput2.Header.Revision <= respput.Header.Revision {
  79. t.Fatalf("expected newer revision on overwrite, got %v <= %v",
  80. respput2.Header.Revision, respput.Header.Revision)
  81. }
  82. reqrange := &pb.RangeRequest{Key: key}
  83. resprange, err := kvc.Range(context.TODO(), reqrange)
  84. if err != nil {
  85. t.Fatalf("couldn't get key (%v)", err)
  86. }
  87. if len(resprange.Kvs) != 1 {
  88. t.Fatalf("expected 1 key, got %v", len(resprange.Kvs))
  89. }
  90. kv := resprange.Kvs[0]
  91. if kv.ModRevision <= kv.CreateRevision {
  92. t.Errorf("expected modRev > createRev, got %d <= %d",
  93. kv.ModRevision, kv.CreateRevision)
  94. }
  95. if !reflect.DeepEqual(reqput.Value, kv.Value) {
  96. t.Errorf("expected value %v, got %v", reqput.Value, kv.Value)
  97. }
  98. }
  99. // TestV3DeleteRange tests various edge cases in the DeleteRange API.
  100. func TestV3DeleteRange(t *testing.T) {
  101. tests := []struct {
  102. keySet []string
  103. begin string
  104. end string
  105. wantSet [][]byte
  106. }{
  107. // delete middle
  108. {
  109. []string{"foo", "foo/abc", "fop"},
  110. "foo/", "fop",
  111. [][]byte{[]byte("foo"), []byte("fop")},
  112. },
  113. // no delete
  114. {
  115. []string{"foo", "foo/abc", "fop"},
  116. "foo/", "foo/",
  117. [][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")},
  118. },
  119. // delete first
  120. {
  121. []string{"foo", "foo/abc", "fop"},
  122. "fo", "fop",
  123. [][]byte{[]byte("fop")},
  124. },
  125. // delete tail
  126. {
  127. []string{"foo", "foo/abc", "fop"},
  128. "foo/", "fos",
  129. [][]byte{[]byte("foo")},
  130. },
  131. // delete exact
  132. {
  133. []string{"foo", "foo/abc", "fop"},
  134. "foo/abc", "",
  135. [][]byte{[]byte("foo"), []byte("fop")},
  136. },
  137. // delete none, [x,x)
  138. {
  139. []string{"foo"},
  140. "foo", "foo",
  141. [][]byte{[]byte("foo")},
  142. },
  143. }
  144. for i, tt := range tests {
  145. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  146. kvc := pb.NewKVClient(clus.RandConn())
  147. ks := tt.keySet
  148. for j := range ks {
  149. reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
  150. _, err := kvc.Put(context.TODO(), reqput)
  151. if err != nil {
  152. t.Fatalf("couldn't put key (%v)", err)
  153. }
  154. }
  155. dreq := &pb.DeleteRangeRequest{
  156. Key: []byte(tt.begin),
  157. RangeEnd: []byte(tt.end)}
  158. dresp, err := kvc.DeleteRange(context.TODO(), dreq)
  159. if err != nil {
  160. t.Fatalf("couldn't delete range on test %d (%v)", i, err)
  161. }
  162. rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
  163. rresp, err := kvc.Range(context.TODO(), rreq)
  164. if err != nil {
  165. t.Errorf("couldn't get range on test %v (%v)", i, err)
  166. }
  167. if dresp.Header.Revision != rresp.Header.Revision {
  168. t.Errorf("expected revision %v, got %v",
  169. dresp.Header.Revision, rresp.Header.Revision)
  170. }
  171. keys := [][]byte{}
  172. for j := range rresp.Kvs {
  173. keys = append(keys, rresp.Kvs[j].Key)
  174. }
  175. if reflect.DeepEqual(tt.wantSet, keys) == false {
  176. t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
  177. }
  178. // can't defer because tcp ports will be in use
  179. clus.Terminate(t)
  180. }
  181. }
  182. // TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
  183. func TestV3WatchFromCurrentRevision(t *testing.T) {
  184. tests := []struct {
  185. putKeys []string
  186. watchRequest *pb.WatchRequest
  187. wresps []*pb.WatchResponse
  188. }{
  189. // watch the key, matching
  190. {
  191. []string{"foo"},
  192. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},
  193. []*pb.WatchResponse{
  194. {
  195. Header: &pb.ResponseHeader{Revision: 1},
  196. Created: true,
  197. },
  198. {
  199. Header: &pb.ResponseHeader{Revision: 2},
  200. Created: false,
  201. Events: []*storagepb.Event{
  202. {
  203. Type: storagepb.PUT,
  204. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  205. },
  206. },
  207. },
  208. },
  209. },
  210. // watch the key, non-matching
  211. {
  212. []string{"foo"},
  213. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("helloworld")}},
  214. []*pb.WatchResponse{
  215. {
  216. Header: &pb.ResponseHeader{Revision: 1},
  217. Created: true,
  218. },
  219. },
  220. },
  221. // watch the prefix, matching
  222. {
  223. []string{"fooLong"},
  224. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},
  225. []*pb.WatchResponse{
  226. {
  227. Header: &pb.ResponseHeader{Revision: 1},
  228. Created: true,
  229. },
  230. {
  231. Header: &pb.ResponseHeader{Revision: 2},
  232. Created: false,
  233. Events: []*storagepb.Event{
  234. {
  235. Type: storagepb.PUT,
  236. Kv: &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  237. },
  238. },
  239. },
  240. },
  241. },
  242. // watch the prefix, non-matching
  243. {
  244. []string{"foo"},
  245. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("helloworld")}},
  246. []*pb.WatchResponse{
  247. {
  248. Header: &pb.ResponseHeader{Revision: 1},
  249. Created: true,
  250. },
  251. },
  252. },
  253. // multiple puts, one watcher with matching key
  254. {
  255. []string{"foo", "foo", "foo"},
  256. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},
  257. []*pb.WatchResponse{
  258. {
  259. Header: &pb.ResponseHeader{Revision: 1},
  260. Created: true,
  261. },
  262. {
  263. Header: &pb.ResponseHeader{Revision: 2},
  264. Created: false,
  265. Events: []*storagepb.Event{
  266. {
  267. Type: storagepb.PUT,
  268. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  269. },
  270. },
  271. },
  272. {
  273. Header: &pb.ResponseHeader{Revision: 3},
  274. Created: false,
  275. Events: []*storagepb.Event{
  276. {
  277. Type: storagepb.PUT,
  278. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
  279. },
  280. },
  281. },
  282. {
  283. Header: &pb.ResponseHeader{Revision: 4},
  284. Created: false,
  285. Events: []*storagepb.Event{
  286. {
  287. Type: storagepb.PUT,
  288. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
  289. },
  290. },
  291. },
  292. },
  293. },
  294. // multiple puts, one watcher with matching prefix
  295. {
  296. []string{"foo", "foo", "foo"},
  297. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},
  298. []*pb.WatchResponse{
  299. {
  300. Header: &pb.ResponseHeader{Revision: 1},
  301. Created: true,
  302. },
  303. {
  304. Header: &pb.ResponseHeader{Revision: 2},
  305. Created: false,
  306. Events: []*storagepb.Event{
  307. {
  308. Type: storagepb.PUT,
  309. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  310. },
  311. },
  312. },
  313. {
  314. Header: &pb.ResponseHeader{Revision: 3},
  315. Created: false,
  316. Events: []*storagepb.Event{
  317. {
  318. Type: storagepb.PUT,
  319. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
  320. },
  321. },
  322. },
  323. {
  324. Header: &pb.ResponseHeader{Revision: 4},
  325. Created: false,
  326. Events: []*storagepb.Event{
  327. {
  328. Type: storagepb.PUT,
  329. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
  330. },
  331. },
  332. },
  333. },
  334. },
  335. // TODO: watch and receive multiple-events from synced (need Txn)
  336. }
  337. for i, tt := range tests {
  338. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  339. wAPI := pb.NewWatchClient(clus.RandConn())
  340. wStream, err := wAPI.Watch(context.TODO())
  341. if err != nil {
  342. t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
  343. }
  344. if err := wStream.Send(tt.watchRequest); err != nil {
  345. t.Fatalf("#%d: wStream.Send error: %v", i, err)
  346. }
  347. go func() {
  348. for _, k := range tt.putKeys {
  349. kvc := pb.NewKVClient(clus.RandConn())
  350. req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
  351. if _, err := kvc.Put(context.TODO(), req); err != nil {
  352. t.Fatalf("#%d: couldn't put key (%v)", i, err)
  353. }
  354. }
  355. }()
  356. var createdWatchId int64
  357. for j, wresp := range tt.wresps {
  358. resp, err := wStream.Recv()
  359. if err != nil {
  360. t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
  361. }
  362. if resp.Header == nil {
  363. t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
  364. }
  365. if resp.Header.Revision != wresp.Header.Revision {
  366. t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
  367. }
  368. if wresp.Created != resp.Created {
  369. t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
  370. }
  371. if resp.Created {
  372. createdWatchId = resp.WatchId
  373. }
  374. if resp.WatchId != createdWatchId {
  375. t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
  376. }
  377. if !reflect.DeepEqual(resp.Events, wresp.Events) {
  378. t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
  379. }
  380. }
  381. rok, nr := WaitResponse(wStream, 1*time.Second)
  382. if !rok {
  383. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  384. }
  385. // can't defer because tcp ports will be in use
  386. clus.Terminate(t)
  387. }
  388. }
  389. // TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
  390. func TestV3WatchCancelSynced(t *testing.T) {
  391. testV3WatchCancel(t, 0)
  392. }
  393. // TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
  394. func TestV3WatchCancelUnsynced(t *testing.T) {
  395. testV3WatchCancel(t, 1)
  396. }
  397. func testV3WatchCancel(t *testing.T, startRev int64) {
  398. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  399. wAPI := pb.NewWatchClient(clus.RandConn())
  400. wStream, errW := wAPI.Watch(context.TODO())
  401. if errW != nil {
  402. t.Fatalf("wAPI.Watch error: %v", errW)
  403. }
  404. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}); err != nil {
  405. t.Fatalf("wStream.Send error: %v", err)
  406. }
  407. wresp, errR := wStream.Recv()
  408. if errR != nil {
  409. t.Errorf("wStream.Recv error: %v", errR)
  410. }
  411. if !wresp.Created {
  412. t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
  413. }
  414. if err := wStream.Send(&pb.WatchRequest{CancelRequest: &pb.WatchCancelRequest{WatchId: wresp.WatchId}}); err != nil {
  415. t.Fatalf("wStream.Send error: %v", err)
  416. }
  417. cresp, err := wStream.Recv()
  418. if err != nil {
  419. t.Errorf("wStream.Recv error: %v", err)
  420. }
  421. if !cresp.Canceled {
  422. t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
  423. }
  424. kvc := pb.NewKVClient(clus.RandConn())
  425. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  426. t.Errorf("couldn't put key (%v)", err)
  427. }
  428. // watch got canceled, so this should block
  429. rok, nr := WaitResponse(wStream, 1*time.Second)
  430. if !rok {
  431. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  432. }
  433. clus.Terminate(t)
  434. }
  435. // TestV3WatchMultiple tests multiple watchers on the same key
  436. // and one watcher with matching prefix. It first puts the key
  437. // that matches all watchers, and another key that matches only
  438. // one watcher to test if it receives expected events.
  439. func TestV3WatchMultiple(t *testing.T) {
  440. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  441. wAPI := pb.NewWatchClient(clus.RandConn())
  442. kvc := pb.NewKVClient(clus.RandConn())
  443. wStream, errW := wAPI.Watch(context.TODO())
  444. if errW != nil {
  445. t.Fatalf("wAPI.Watch error: %v", errW)
  446. }
  447. watchKeyN := 4
  448. for i := 0; i < watchKeyN+1; i++ {
  449. var wreq *pb.WatchRequest
  450. if i < watchKeyN {
  451. wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}}
  452. } else {
  453. wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("fo")}}
  454. }
  455. if err := wStream.Send(wreq); err != nil {
  456. t.Fatalf("wStream.Send error: %v", err)
  457. }
  458. }
  459. ids := make(map[int64]struct{})
  460. for i := 0; i < watchKeyN+1; i++ {
  461. wresp, err := wStream.Recv()
  462. if err != nil {
  463. t.Fatalf("wStream.Recv error: %v", err)
  464. }
  465. if !wresp.Created {
  466. t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
  467. }
  468. ids[wresp.WatchId] = struct{}{}
  469. }
  470. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  471. t.Fatalf("couldn't put key (%v)", err)
  472. }
  473. for i := 0; i < watchKeyN+1; i++ {
  474. wresp, err := wStream.Recv()
  475. if err != nil {
  476. t.Fatalf("wStream.Recv error: %v", err)
  477. }
  478. if _, ok := ids[wresp.WatchId]; !ok {
  479. t.Errorf("watchId %d is not created!", wresp.WatchId)
  480. } else {
  481. delete(ids, wresp.WatchId)
  482. }
  483. if len(wresp.Events) == 0 {
  484. t.Errorf("#%d: no events received", i)
  485. }
  486. for _, ev := range wresp.Events {
  487. if string(ev.Kv.Key) != "foo" {
  488. t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key)
  489. }
  490. if string(ev.Kv.Value) != "bar" {
  491. t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value)
  492. }
  493. }
  494. }
  495. // now put one key that has only one matching watcher
  496. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil {
  497. t.Fatalf("couldn't put key (%v)", err)
  498. }
  499. wresp, err := wStream.Recv()
  500. if err != nil {
  501. t.Errorf("wStream.Recv error: %v", err)
  502. }
  503. if len(wresp.Events) != 1 {
  504. t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events))
  505. }
  506. if string(wresp.Events[0].Kv.Key) != "fo" {
  507. t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key)
  508. }
  509. // now Recv should block because there is no more events coming
  510. rok, nr := WaitResponse(wStream, 1*time.Second)
  511. if !rok {
  512. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  513. }
  514. clus.Terminate(t)
  515. }
  516. // TestV3WatchMultipleEventsFromCurrentRevision tests Watch APIs from current revision
  517. // in cases it receives multiple events.
  518. func TestV3WatchMultipleEventsFromCurrentRevision(t *testing.T) {
  519. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  520. wAPI := pb.NewWatchClient(clus.RandConn())
  521. wStream, wErr := wAPI.Watch(context.TODO())
  522. if wErr != nil {
  523. t.Fatalf("wAPI.Watch error: %v", wErr)
  524. }
  525. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}}); err != nil {
  526. t.Fatalf("wStream.Send error: %v", err)
  527. }
  528. kvc := pb.NewKVClient(clus.RandConn())
  529. txn := pb.TxnRequest{}
  530. for i := 0; i < 3; i++ {
  531. ru := &pb.RequestUnion{}
  532. ru.RequestPut = &pb.PutRequest{Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}
  533. txn.Success = append(txn.Success, ru)
  534. }
  535. tresp, err := kvc.Txn(context.Background(), &txn)
  536. if err != nil {
  537. t.Fatalf("kvc.Txn error: %v", err)
  538. }
  539. if !tresp.Succeeded {
  540. t.Fatalf("kvc.Txn failed: %+v", tresp)
  541. }
  542. events := []*storagepb.Event{}
  543. for len(events) < 3 {
  544. resp, err := wStream.Recv()
  545. if err != nil {
  546. t.Errorf("wStream.Recv error: %v", err)
  547. }
  548. if resp.Created {
  549. continue
  550. }
  551. events = append(events, resp.Events...)
  552. }
  553. sort.Sort(eventsSortByKey(events))
  554. wevents := []*storagepb.Event{
  555. {
  556. Type: storagepb.PUT,
  557. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  558. },
  559. {
  560. Type: storagepb.PUT,
  561. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  562. },
  563. {
  564. Type: storagepb.PUT,
  565. Kv: &storagepb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  566. },
  567. }
  568. if !reflect.DeepEqual(events, wevents) {
  569. t.Errorf("events got = %+v, want = %+v", events, wevents)
  570. }
  571. rok, nr := WaitResponse(wStream, 1*time.Second)
  572. if !rok {
  573. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  574. }
  575. // can't defer because tcp ports will be in use
  576. clus.Terminate(t)
  577. }
  578. type eventsSortByKey []*storagepb.Event
  579. func (evs eventsSortByKey) Len() int { return len(evs) }
  580. func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
  581. func (evs eventsSortByKey) Less(i, j int) bool { return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 }
  582. // TestV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
  583. func TestV3WatchMultipleStreams(t *testing.T) {
  584. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  585. wAPI := pb.NewWatchClient(clus.RandConn())
  586. kvc := pb.NewKVClient(clus.RandConn())
  587. streams := make([]pb.Watch_WatchClient, 5)
  588. for i := range streams {
  589. wStream, errW := wAPI.Watch(context.TODO())
  590. if errW != nil {
  591. t.Fatalf("wAPI.Watch error: %v", errW)
  592. }
  593. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}}); err != nil {
  594. t.Fatalf("wStream.Send error: %v", err)
  595. }
  596. streams[i] = wStream
  597. }
  598. for _, wStream := range streams {
  599. wresp, err := wStream.Recv()
  600. if err != nil {
  601. t.Fatalf("wStream.Recv error: %v", err)
  602. }
  603. if !wresp.Created {
  604. t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
  605. }
  606. }
  607. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  608. t.Fatalf("couldn't put key (%v)", err)
  609. }
  610. var wg sync.WaitGroup
  611. wg.Add(len(streams))
  612. wevents := []*storagepb.Event{
  613. {
  614. Type: storagepb.PUT,
  615. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  616. },
  617. }
  618. for i := range streams {
  619. go func(i int) {
  620. defer wg.Done()
  621. wStream := streams[i]
  622. wresp, err := wStream.Recv()
  623. if err != nil {
  624. t.Fatalf("wStream.Recv error: %v", err)
  625. }
  626. if wresp.WatchId != 0 {
  627. t.Errorf("watchId got = %d, want = 0", wresp.WatchId)
  628. }
  629. if !reflect.DeepEqual(wresp.Events, wevents) {
  630. t.Errorf("wresp.Events got = %+v, want = %+v", wresp.Events, wevents)
  631. }
  632. // now Recv should block because there is no more events coming
  633. rok, nr := WaitResponse(wStream, 1*time.Second)
  634. if !rok {
  635. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  636. }
  637. }(i)
  638. }
  639. wg.Wait()
  640. clus.Terminate(t)
  641. }
  642. // WaitResponse waits on the given stream for given duration.
  643. // If there is no more events, true and a nil response will be
  644. // returned closing the WatchClient stream. Or the response will
  645. // be returned.
  646. func WaitResponse(wc pb.Watch_WatchClient, timeout time.Duration) (bool, *pb.WatchResponse) {
  647. rCh := make(chan *pb.WatchResponse)
  648. go func() {
  649. resp, _ := wc.Recv()
  650. rCh <- resp
  651. }()
  652. select {
  653. case nr := <-rCh:
  654. return false, nr
  655. case <-time.After(timeout):
  656. }
  657. wc.CloseSend()
  658. rv, ok := <-rCh
  659. if rv != nil || !ok {
  660. return false, rv
  661. }
  662. return true, nil
  663. }
  664. func TestV3RangeRequest(t *testing.T) {
  665. tests := []struct {
  666. putKeys []string
  667. reqs []pb.RangeRequest
  668. wresps [][]string
  669. wmores []bool
  670. }{
  671. // single key
  672. {
  673. []string{"foo", "bar"},
  674. []pb.RangeRequest{
  675. // exists
  676. {Key: []byte("foo")},
  677. // doesn't exist
  678. {Key: []byte("baz")},
  679. },
  680. [][]string{
  681. {"foo"},
  682. {},
  683. },
  684. []bool{false, false},
  685. },
  686. // multi-key
  687. {
  688. []string{"a", "b", "c", "d", "e"},
  689. []pb.RangeRequest{
  690. // all in range
  691. {Key: []byte("a"), RangeEnd: []byte("z")},
  692. // [b, d)
  693. {Key: []byte("b"), RangeEnd: []byte("d")},
  694. // out of range
  695. {Key: []byte("f"), RangeEnd: []byte("z")},
  696. // [c,c) = empty
  697. {Key: []byte("c"), RangeEnd: []byte("c")},
  698. // [d, b) = empty
  699. {Key: []byte("d"), RangeEnd: []byte("b")},
  700. },
  701. [][]string{
  702. {"a", "b", "c", "d", "e"},
  703. {"b", "c"},
  704. {},
  705. {},
  706. {},
  707. },
  708. []bool{false, false, false, false, false},
  709. },
  710. // revision
  711. {
  712. []string{"a", "b", "c", "d", "e"},
  713. []pb.RangeRequest{
  714. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0},
  715. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 1},
  716. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 2},
  717. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 3},
  718. },
  719. [][]string{
  720. {"a", "b", "c", "d", "e"},
  721. {},
  722. {"a"},
  723. {"a", "b"},
  724. },
  725. []bool{false, false, false, false},
  726. },
  727. // limit
  728. {
  729. []string{"foo", "bar"},
  730. []pb.RangeRequest{
  731. // more
  732. {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1},
  733. // no more
  734. {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2},
  735. },
  736. [][]string{
  737. {"bar"},
  738. {"bar", "foo"},
  739. },
  740. []bool{true, false},
  741. },
  742. // sort
  743. {
  744. []string{"b", "a", "c", "d", "c"},
  745. []pb.RangeRequest{
  746. {
  747. Key: []byte("a"), RangeEnd: []byte("z"),
  748. Limit: 1,
  749. SortOrder: pb.RangeRequest_ASCEND,
  750. SortTarget: pb.RangeRequest_KEY,
  751. },
  752. {
  753. Key: []byte("a"), RangeEnd: []byte("z"),
  754. Limit: 1,
  755. SortOrder: pb.RangeRequest_DESCEND,
  756. SortTarget: pb.RangeRequest_KEY,
  757. },
  758. {
  759. Key: []byte("a"), RangeEnd: []byte("z"),
  760. Limit: 1,
  761. SortOrder: pb.RangeRequest_ASCEND,
  762. SortTarget: pb.RangeRequest_CREATE,
  763. },
  764. {
  765. Key: []byte("a"), RangeEnd: []byte("z"),
  766. Limit: 1,
  767. SortOrder: pb.RangeRequest_DESCEND,
  768. SortTarget: pb.RangeRequest_MOD,
  769. },
  770. {
  771. Key: []byte("z"), RangeEnd: []byte("z"),
  772. Limit: 1,
  773. SortOrder: pb.RangeRequest_DESCEND,
  774. SortTarget: pb.RangeRequest_CREATE,
  775. },
  776. },
  777. [][]string{
  778. {"a"},
  779. {"d"},
  780. {"b"},
  781. {"c"},
  782. {},
  783. },
  784. []bool{true, true, true, true, false},
  785. },
  786. }
  787. for i, tt := range tests {
  788. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  789. for _, k := range tt.putKeys {
  790. kvc := pb.NewKVClient(clus.RandConn())
  791. req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
  792. if _, err := kvc.Put(context.TODO(), req); err != nil {
  793. t.Fatalf("#%d: couldn't put key (%v)", i, err)
  794. }
  795. }
  796. for j, req := range tt.reqs {
  797. kvc := pb.NewKVClient(clus.RandConn())
  798. resp, err := kvc.Range(context.TODO(), &req)
  799. if err != nil {
  800. t.Errorf("#%d.%d: Range error: %v", i, j, err)
  801. continue
  802. }
  803. if len(resp.Kvs) != len(tt.wresps[j]) {
  804. t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
  805. continue
  806. }
  807. for k, wKey := range tt.wresps[j] {
  808. respKey := string(resp.Kvs[k].Key)
  809. if respKey != wKey {
  810. t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
  811. }
  812. }
  813. if resp.More != tt.wmores[j] {
  814. t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
  815. }
  816. wrev := req.Revision
  817. if wrev == 0 {
  818. wrev = int64(len(tt.putKeys) + 1)
  819. }
  820. if resp.Header.Revision != wrev {
  821. t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
  822. }
  823. }
  824. clus.Terminate(t)
  825. }
  826. }