v3_grpc_test.go 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. // Copyright 2016 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.package recipe
  14. package integration
  15. import (
  16. "bytes"
  17. "fmt"
  18. "math/rand"
  19. "reflect"
  20. "sort"
  21. "sync"
  22. "testing"
  23. "time"
  24. "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
  25. "github.com/coreos/etcd/Godeps/_workspace/src/google.golang.org/grpc"
  26. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  27. "github.com/coreos/etcd/storage/storagepb"
  28. )
  29. type clusterV3 struct {
  30. *cluster
  31. conns []*grpc.ClientConn
  32. }
  33. // newClusterGRPC returns a launched cluster with a grpc client connection
  34. // for each cluster member.
  35. func newClusterGRPC(t *testing.T, cfg *clusterConfig) *clusterV3 {
  36. cfg.useV3 = true
  37. cfg.useGRPC = true
  38. clus := &clusterV3{cluster: NewClusterByConfig(t, cfg)}
  39. for _, m := range clus.Members {
  40. conn, err := NewGRPCClient(m)
  41. if err != nil {
  42. t.Fatal(err)
  43. }
  44. clus.conns = append(clus.conns, conn)
  45. }
  46. clus.Launch(t)
  47. return clus
  48. }
  49. func (c *clusterV3) Terminate(t *testing.T) {
  50. for _, conn := range c.conns {
  51. if err := conn.Close(); err != nil {
  52. t.Error(err)
  53. }
  54. }
  55. c.cluster.Terminate(t)
  56. }
  57. func (c *clusterV3) RandConn() *grpc.ClientConn {
  58. return c.conns[rand.Intn(len(c.conns))]
  59. }
  60. // TestV3PutOverwrite puts a key with the v3 api to a random cluster member,
  61. // overwrites it, then checks that the change was applied.
  62. func TestV3PutOverwrite(t *testing.T) {
  63. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  64. defer clus.Terminate(t)
  65. kvc := pb.NewKVClient(clus.RandConn())
  66. key := []byte("foo")
  67. reqput := &pb.PutRequest{Key: key, Value: []byte("bar")}
  68. respput, err := kvc.Put(context.TODO(), reqput)
  69. if err != nil {
  70. t.Fatalf("couldn't put key (%v)", err)
  71. }
  72. // overwrite
  73. reqput.Value = []byte("baz")
  74. respput2, err := kvc.Put(context.TODO(), reqput)
  75. if err != nil {
  76. t.Fatalf("couldn't put key (%v)", err)
  77. }
  78. if respput2.Header.Revision <= respput.Header.Revision {
  79. t.Fatalf("expected newer revision on overwrite, got %v <= %v",
  80. respput2.Header.Revision, respput.Header.Revision)
  81. }
  82. reqrange := &pb.RangeRequest{Key: key}
  83. resprange, err := kvc.Range(context.TODO(), reqrange)
  84. if err != nil {
  85. t.Fatalf("couldn't get key (%v)", err)
  86. }
  87. if len(resprange.Kvs) != 1 {
  88. t.Fatalf("expected 1 key, got %v", len(resprange.Kvs))
  89. }
  90. kv := resprange.Kvs[0]
  91. if kv.ModRevision <= kv.CreateRevision {
  92. t.Errorf("expected modRev > createRev, got %d <= %d",
  93. kv.ModRevision, kv.CreateRevision)
  94. }
  95. if !reflect.DeepEqual(reqput.Value, kv.Value) {
  96. t.Errorf("expected value %v, got %v", reqput.Value, kv.Value)
  97. }
  98. }
  99. // TestV3DeleteRange tests various edge cases in the DeleteRange API.
  100. func TestV3DeleteRange(t *testing.T) {
  101. tests := []struct {
  102. keySet []string
  103. begin string
  104. end string
  105. wantSet [][]byte
  106. }{
  107. // delete middle
  108. {
  109. []string{"foo", "foo/abc", "fop"},
  110. "foo/", "fop",
  111. [][]byte{[]byte("foo"), []byte("fop")},
  112. },
  113. // no delete
  114. {
  115. []string{"foo", "foo/abc", "fop"},
  116. "foo/", "foo/",
  117. [][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")},
  118. },
  119. // delete first
  120. {
  121. []string{"foo", "foo/abc", "fop"},
  122. "fo", "fop",
  123. [][]byte{[]byte("fop")},
  124. },
  125. // delete tail
  126. {
  127. []string{"foo", "foo/abc", "fop"},
  128. "foo/", "fos",
  129. [][]byte{[]byte("foo")},
  130. },
  131. // delete exact
  132. {
  133. []string{"foo", "foo/abc", "fop"},
  134. "foo/abc", "",
  135. [][]byte{[]byte("foo"), []byte("fop")},
  136. },
  137. // delete none, [x,x)
  138. {
  139. []string{"foo"},
  140. "foo", "foo",
  141. [][]byte{[]byte("foo")},
  142. },
  143. }
  144. for i, tt := range tests {
  145. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  146. kvc := pb.NewKVClient(clus.RandConn())
  147. ks := tt.keySet
  148. for j := range ks {
  149. reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}}
  150. _, err := kvc.Put(context.TODO(), reqput)
  151. if err != nil {
  152. t.Fatalf("couldn't put key (%v)", err)
  153. }
  154. }
  155. dreq := &pb.DeleteRangeRequest{
  156. Key: []byte(tt.begin),
  157. RangeEnd: []byte(tt.end)}
  158. dresp, err := kvc.DeleteRange(context.TODO(), dreq)
  159. if err != nil {
  160. t.Fatalf("couldn't delete range on test %d (%v)", i, err)
  161. }
  162. rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}}
  163. rresp, err := kvc.Range(context.TODO(), rreq)
  164. if err != nil {
  165. t.Errorf("couldn't get range on test %v (%v)", i, err)
  166. }
  167. if dresp.Header.Revision != rresp.Header.Revision {
  168. t.Errorf("expected revision %v, got %v",
  169. dresp.Header.Revision, rresp.Header.Revision)
  170. }
  171. keys := [][]byte{}
  172. for j := range rresp.Kvs {
  173. keys = append(keys, rresp.Kvs[j].Key)
  174. }
  175. if reflect.DeepEqual(tt.wantSet, keys) == false {
  176. t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys)
  177. }
  178. // can't defer because tcp ports will be in use
  179. clus.Terminate(t)
  180. }
  181. }
  182. // TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
  183. func TestV3WatchFromCurrentRevision(t *testing.T) {
  184. tests := []struct {
  185. putKeys []string
  186. watchRequest *pb.WatchRequest
  187. wresps []*pb.WatchResponse
  188. }{
  189. // watch the key, matching
  190. {
  191. []string{"foo"},
  192. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},
  193. []*pb.WatchResponse{
  194. {
  195. Header: &pb.ResponseHeader{Revision: 1},
  196. Created: true,
  197. },
  198. {
  199. Header: &pb.ResponseHeader{Revision: 2},
  200. Created: false,
  201. Events: []*storagepb.Event{
  202. {
  203. Type: storagepb.PUT,
  204. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  205. },
  206. },
  207. },
  208. },
  209. },
  210. // watch the key, non-matching
  211. {
  212. []string{"foo"},
  213. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("helloworld")}},
  214. []*pb.WatchResponse{
  215. {
  216. Header: &pb.ResponseHeader{Revision: 1},
  217. Created: true,
  218. },
  219. },
  220. },
  221. // watch the prefix, matching
  222. {
  223. []string{"fooLong"},
  224. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},
  225. []*pb.WatchResponse{
  226. {
  227. Header: &pb.ResponseHeader{Revision: 1},
  228. Created: true,
  229. },
  230. {
  231. Header: &pb.ResponseHeader{Revision: 2},
  232. Created: false,
  233. Events: []*storagepb.Event{
  234. {
  235. Type: storagepb.PUT,
  236. Kv: &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  237. },
  238. },
  239. },
  240. },
  241. },
  242. // watch the prefix, non-matching
  243. {
  244. []string{"foo"},
  245. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("helloworld")}},
  246. []*pb.WatchResponse{
  247. {
  248. Header: &pb.ResponseHeader{Revision: 1},
  249. Created: true,
  250. },
  251. },
  252. },
  253. // multiple puts, one watcher with matching key
  254. {
  255. []string{"foo", "foo", "foo"},
  256. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}},
  257. []*pb.WatchResponse{
  258. {
  259. Header: &pb.ResponseHeader{Revision: 1},
  260. Created: true,
  261. },
  262. {
  263. Header: &pb.ResponseHeader{Revision: 2},
  264. Created: false,
  265. Events: []*storagepb.Event{
  266. {
  267. Type: storagepb.PUT,
  268. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  269. },
  270. },
  271. },
  272. {
  273. Header: &pb.ResponseHeader{Revision: 3},
  274. Created: false,
  275. Events: []*storagepb.Event{
  276. {
  277. Type: storagepb.PUT,
  278. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
  279. },
  280. },
  281. },
  282. {
  283. Header: &pb.ResponseHeader{Revision: 4},
  284. Created: false,
  285. Events: []*storagepb.Event{
  286. {
  287. Type: storagepb.PUT,
  288. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
  289. },
  290. },
  291. },
  292. },
  293. },
  294. // multiple puts, one watcher with matching prefix
  295. {
  296. []string{"foo", "foo", "foo"},
  297. &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo")}},
  298. []*pb.WatchResponse{
  299. {
  300. Header: &pb.ResponseHeader{Revision: 1},
  301. Created: true,
  302. },
  303. {
  304. Header: &pb.ResponseHeader{Revision: 2},
  305. Created: false,
  306. Events: []*storagepb.Event{
  307. {
  308. Type: storagepb.PUT,
  309. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  310. },
  311. },
  312. },
  313. {
  314. Header: &pb.ResponseHeader{Revision: 3},
  315. Created: false,
  316. Events: []*storagepb.Event{
  317. {
  318. Type: storagepb.PUT,
  319. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
  320. },
  321. },
  322. },
  323. {
  324. Header: &pb.ResponseHeader{Revision: 4},
  325. Created: false,
  326. Events: []*storagepb.Event{
  327. {
  328. Type: storagepb.PUT,
  329. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
  330. },
  331. },
  332. },
  333. },
  334. },
  335. // TODO: watch and receive multiple-events from synced (need Txn)
  336. }
  337. for i, tt := range tests {
  338. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  339. wAPI := pb.NewWatchClient(clus.RandConn())
  340. wStream, err := wAPI.Watch(context.TODO())
  341. if err != nil {
  342. t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
  343. }
  344. if err := wStream.Send(tt.watchRequest); err != nil {
  345. t.Fatalf("#%d: wStream.Send error: %v", i, err)
  346. }
  347. go func() {
  348. for _, k := range tt.putKeys {
  349. kvc := pb.NewKVClient(clus.RandConn())
  350. req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
  351. if _, err := kvc.Put(context.TODO(), req); err != nil {
  352. t.Fatalf("#%d: couldn't put key (%v)", i, err)
  353. }
  354. }
  355. }()
  356. var createdWatchId int64
  357. for j, wresp := range tt.wresps {
  358. resp, err := wStream.Recv()
  359. if err != nil {
  360. t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
  361. }
  362. if resp.Header == nil {
  363. t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
  364. }
  365. if resp.Header.Revision != wresp.Header.Revision {
  366. t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
  367. }
  368. if wresp.Created != resp.Created {
  369. t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
  370. }
  371. if resp.Created {
  372. createdWatchId = resp.WatchId
  373. }
  374. if resp.WatchId != createdWatchId {
  375. t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
  376. }
  377. if !reflect.DeepEqual(resp.Events, wresp.Events) {
  378. t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
  379. }
  380. }
  381. rok, nr := WaitResponse(wStream, 1*time.Second)
  382. if !rok {
  383. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  384. }
  385. // can't defer because tcp ports will be in use
  386. clus.Terminate(t)
  387. }
  388. }
  389. // TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
  390. func TestV3WatchCancelSynced(t *testing.T) {
  391. testV3WatchCancel(t, 0)
  392. }
  393. // TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
  394. func TestV3WatchCancelUnsynced(t *testing.T) {
  395. testV3WatchCancel(t, 1)
  396. }
  397. func testV3WatchCancel(t *testing.T, startRev int64) {
  398. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  399. wAPI := pb.NewWatchClient(clus.RandConn())
  400. wStream, errW := wAPI.Watch(context.TODO())
  401. if errW != nil {
  402. t.Fatalf("wAPI.Watch error: %v", errW)
  403. }
  404. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}); err != nil {
  405. t.Fatalf("wStream.Send error: %v", err)
  406. }
  407. wresp, errR := wStream.Recv()
  408. if errR != nil {
  409. t.Errorf("wStream.Recv error: %v", errR)
  410. }
  411. if !wresp.Created {
  412. t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
  413. }
  414. if err := wStream.Send(&pb.WatchRequest{CancelRequest: &pb.WatchCancelRequest{WatchId: wresp.WatchId}}); err != nil {
  415. t.Fatalf("wStream.Send error: %v", err)
  416. }
  417. cresp, err := wStream.Recv()
  418. if err != nil {
  419. t.Errorf("wStream.Recv error: %v", err)
  420. }
  421. if !cresp.Canceled {
  422. t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
  423. }
  424. kvc := pb.NewKVClient(clus.RandConn())
  425. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  426. t.Errorf("couldn't put key (%v)", err)
  427. }
  428. // watch got canceled, so this should block
  429. rok, nr := WaitResponse(wStream, 1*time.Second)
  430. if !rok {
  431. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  432. }
  433. clus.Terminate(t)
  434. }
  435. func TestV3WatchMultipleWatchersSynced(t *testing.T) {
  436. testV3WatchMultipleWatchers(t, 0)
  437. }
  438. func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
  439. testV3WatchMultipleWatchers(t, 1)
  440. }
  441. // testV3WatchMultipleWatchers tests multiple watchers on the same key
  442. // and one watcher with matching prefix. It first puts the key
  443. // that matches all watchers, and another key that matches only
  444. // one watcher to test if it receives expected events.
  445. func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
  446. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  447. wAPI := pb.NewWatchClient(clus.RandConn())
  448. kvc := pb.NewKVClient(clus.RandConn())
  449. wStream, errW := wAPI.Watch(context.TODO())
  450. if errW != nil {
  451. t.Fatalf("wAPI.Watch error: %v", errW)
  452. }
  453. watchKeyN := 4
  454. for i := 0; i < watchKeyN+1; i++ {
  455. var wreq *pb.WatchRequest
  456. if i < watchKeyN {
  457. wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}
  458. } else {
  459. wreq = &pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("fo"), StartRevision: startRev}}
  460. }
  461. if err := wStream.Send(wreq); err != nil {
  462. t.Fatalf("wStream.Send error: %v", err)
  463. }
  464. }
  465. ids := make(map[int64]struct{})
  466. for i := 0; i < watchKeyN+1; i++ {
  467. wresp, err := wStream.Recv()
  468. if err != nil {
  469. t.Fatalf("wStream.Recv error: %v", err)
  470. }
  471. if !wresp.Created {
  472. t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
  473. }
  474. ids[wresp.WatchId] = struct{}{}
  475. }
  476. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  477. t.Fatalf("couldn't put key (%v)", err)
  478. }
  479. for i := 0; i < watchKeyN+1; i++ {
  480. wresp, err := wStream.Recv()
  481. if err != nil {
  482. t.Fatalf("wStream.Recv error: %v", err)
  483. }
  484. if _, ok := ids[wresp.WatchId]; !ok {
  485. t.Errorf("watchId %d is not created!", wresp.WatchId)
  486. } else {
  487. delete(ids, wresp.WatchId)
  488. }
  489. if len(wresp.Events) == 0 {
  490. t.Errorf("#%d: no events received", i)
  491. }
  492. for _, ev := range wresp.Events {
  493. if string(ev.Kv.Key) != "foo" {
  494. t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key)
  495. }
  496. if string(ev.Kv.Value) != "bar" {
  497. t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value)
  498. }
  499. }
  500. }
  501. // now put one key that has only one matching watcher
  502. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil {
  503. t.Fatalf("couldn't put key (%v)", err)
  504. }
  505. wresp, err := wStream.Recv()
  506. if err != nil {
  507. t.Errorf("wStream.Recv error: %v", err)
  508. }
  509. if len(wresp.Events) != 1 {
  510. t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events))
  511. }
  512. if string(wresp.Events[0].Kv.Key) != "fo" {
  513. t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key)
  514. }
  515. // now Recv should block because there is no more events coming
  516. rok, nr := WaitResponse(wStream, 1*time.Second)
  517. if !rok {
  518. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  519. }
  520. clus.Terminate(t)
  521. }
  522. func TestV3WatchMultipleEventsTxnSynced(t *testing.T) {
  523. testV3WatchMultipleEventsTxn(t, 0)
  524. }
  525. func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) {
  526. testV3WatchMultipleEventsTxn(t, 1)
  527. }
  528. // testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events.
  529. func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
  530. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  531. wAPI := pb.NewWatchClient(clus.RandConn())
  532. wStream, wErr := wAPI.Watch(context.TODO())
  533. if wErr != nil {
  534. t.Fatalf("wAPI.Watch error: %v", wErr)
  535. }
  536. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo"), StartRevision: startRev}}); err != nil {
  537. t.Fatalf("wStream.Send error: %v", err)
  538. }
  539. kvc := pb.NewKVClient(clus.RandConn())
  540. txn := pb.TxnRequest{}
  541. for i := 0; i < 3; i++ {
  542. ru := &pb.RequestUnion{}
  543. ru.RequestPut = &pb.PutRequest{Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}
  544. txn.Success = append(txn.Success, ru)
  545. }
  546. tresp, err := kvc.Txn(context.Background(), &txn)
  547. if err != nil {
  548. t.Fatalf("kvc.Txn error: %v", err)
  549. }
  550. if !tresp.Succeeded {
  551. t.Fatalf("kvc.Txn failed: %+v", tresp)
  552. }
  553. events := []*storagepb.Event{}
  554. for len(events) < 3 {
  555. resp, err := wStream.Recv()
  556. if err != nil {
  557. t.Errorf("wStream.Recv error: %v", err)
  558. }
  559. if resp.Created {
  560. continue
  561. }
  562. events = append(events, resp.Events...)
  563. }
  564. sort.Sort(eventsSortByKey(events))
  565. wevents := []*storagepb.Event{
  566. {
  567. Type: storagepb.PUT,
  568. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  569. },
  570. {
  571. Type: storagepb.PUT,
  572. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  573. },
  574. {
  575. Type: storagepb.PUT,
  576. Kv: &storagepb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  577. },
  578. }
  579. if !reflect.DeepEqual(events, wevents) {
  580. t.Errorf("events got = %+v, want = %+v", events, wevents)
  581. }
  582. rok, nr := WaitResponse(wStream, 1*time.Second)
  583. if !rok {
  584. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  585. }
  586. // can't defer because tcp ports will be in use
  587. clus.Terminate(t)
  588. }
  589. type eventsSortByKey []*storagepb.Event
  590. func (evs eventsSortByKey) Len() int { return len(evs) }
  591. func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
  592. func (evs eventsSortByKey) Less(i, j int) bool { return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 }
  593. func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
  594. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  595. defer clus.Terminate(t)
  596. kvc := pb.NewKVClient(clus.RandConn())
  597. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
  598. t.Fatalf("couldn't put key (%v)", err)
  599. }
  600. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
  601. t.Fatalf("couldn't put key (%v)", err)
  602. }
  603. wAPI := pb.NewWatchClient(clus.RandConn())
  604. wStream, wErr := wAPI.Watch(context.TODO())
  605. if wErr != nil {
  606. t.Fatalf("wAPI.Watch error: %v", wErr)
  607. }
  608. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Prefix: []byte("foo"), StartRevision: 1}}); err != nil {
  609. t.Fatalf("wStream.Send error: %v", err)
  610. }
  611. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
  612. t.Fatalf("couldn't put key (%v)", err)
  613. }
  614. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
  615. t.Fatalf("couldn't put key (%v)", err)
  616. }
  617. allWevents := []*storagepb.Event{
  618. {
  619. Type: storagepb.PUT,
  620. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  621. },
  622. {
  623. Type: storagepb.PUT,
  624. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 3, Version: 1},
  625. },
  626. {
  627. Type: storagepb.PUT,
  628. Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 2},
  629. },
  630. {
  631. Type: storagepb.PUT,
  632. Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 5, Version: 2},
  633. },
  634. }
  635. events := []*storagepb.Event{}
  636. for len(events) < 4 {
  637. resp, err := wStream.Recv()
  638. if err != nil {
  639. t.Errorf("wStream.Recv error: %v", err)
  640. }
  641. if resp.Created {
  642. continue
  643. }
  644. events = append(events, resp.Events...)
  645. // if PUT requests are committed by now, first receive would return
  646. // multiple events, but if not, it returns a single event. In SSD,
  647. // it should return 4 events at once.
  648. }
  649. if !reflect.DeepEqual(events, allWevents) {
  650. t.Errorf("events got = %+v, want = %+v", events, allWevents)
  651. }
  652. rok, nr := WaitResponse(wStream, 1*time.Second)
  653. if !rok {
  654. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  655. }
  656. }
  657. func TestV3WatchMultipleStreamsSynced(t *testing.T) {
  658. testV3WatchMultipleStreams(t, 0)
  659. }
  660. func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
  661. testV3WatchMultipleStreams(t, 1)
  662. }
  663. // testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
  664. func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
  665. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  666. wAPI := pb.NewWatchClient(clus.RandConn())
  667. kvc := pb.NewKVClient(clus.RandConn())
  668. streams := make([]pb.Watch_WatchClient, 5)
  669. for i := range streams {
  670. wStream, errW := wAPI.Watch(context.TODO())
  671. if errW != nil {
  672. t.Fatalf("wAPI.Watch error: %v", errW)
  673. }
  674. if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: startRev}}); err != nil {
  675. t.Fatalf("wStream.Send error: %v", err)
  676. }
  677. streams[i] = wStream
  678. }
  679. for _, wStream := range streams {
  680. wresp, err := wStream.Recv()
  681. if err != nil {
  682. t.Fatalf("wStream.Recv error: %v", err)
  683. }
  684. if !wresp.Created {
  685. t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
  686. }
  687. }
  688. if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
  689. t.Fatalf("couldn't put key (%v)", err)
  690. }
  691. var wg sync.WaitGroup
  692. wg.Add(len(streams))
  693. wevents := []*storagepb.Event{
  694. {
  695. Type: storagepb.PUT,
  696. Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
  697. },
  698. }
  699. for i := range streams {
  700. go func(i int) {
  701. defer wg.Done()
  702. wStream := streams[i]
  703. wresp, err := wStream.Recv()
  704. if err != nil {
  705. t.Fatalf("wStream.Recv error: %v", err)
  706. }
  707. if wresp.WatchId != 0 {
  708. t.Errorf("watchId got = %d, want = 0", wresp.WatchId)
  709. }
  710. if !reflect.DeepEqual(wresp.Events, wevents) {
  711. t.Errorf("wresp.Events got = %+v, want = %+v", wresp.Events, wevents)
  712. }
  713. // now Recv should block because there is no more events coming
  714. rok, nr := WaitResponse(wStream, 1*time.Second)
  715. if !rok {
  716. t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
  717. }
  718. }(i)
  719. }
  720. wg.Wait()
  721. clus.Terminate(t)
  722. }
  723. // WaitResponse waits on the given stream for given duration.
  724. // If there is no more events, true and a nil response will be
  725. // returned closing the WatchClient stream. Or the response will
  726. // be returned.
  727. func WaitResponse(wc pb.Watch_WatchClient, timeout time.Duration) (bool, *pb.WatchResponse) {
  728. rCh := make(chan *pb.WatchResponse)
  729. go func() {
  730. resp, _ := wc.Recv()
  731. rCh <- resp
  732. }()
  733. select {
  734. case nr := <-rCh:
  735. return false, nr
  736. case <-time.After(timeout):
  737. }
  738. wc.CloseSend()
  739. rv, ok := <-rCh
  740. if rv != nil || !ok {
  741. return false, rv
  742. }
  743. return true, nil
  744. }
  745. func TestV3RangeRequest(t *testing.T) {
  746. tests := []struct {
  747. putKeys []string
  748. reqs []pb.RangeRequest
  749. wresps [][]string
  750. wmores []bool
  751. }{
  752. // single key
  753. {
  754. []string{"foo", "bar"},
  755. []pb.RangeRequest{
  756. // exists
  757. {Key: []byte("foo")},
  758. // doesn't exist
  759. {Key: []byte("baz")},
  760. },
  761. [][]string{
  762. {"foo"},
  763. {},
  764. },
  765. []bool{false, false},
  766. },
  767. // multi-key
  768. {
  769. []string{"a", "b", "c", "d", "e"},
  770. []pb.RangeRequest{
  771. // all in range
  772. {Key: []byte("a"), RangeEnd: []byte("z")},
  773. // [b, d)
  774. {Key: []byte("b"), RangeEnd: []byte("d")},
  775. // out of range
  776. {Key: []byte("f"), RangeEnd: []byte("z")},
  777. // [c,c) = empty
  778. {Key: []byte("c"), RangeEnd: []byte("c")},
  779. // [d, b) = empty
  780. {Key: []byte("d"), RangeEnd: []byte("b")},
  781. },
  782. [][]string{
  783. {"a", "b", "c", "d", "e"},
  784. {"b", "c"},
  785. {},
  786. {},
  787. {},
  788. },
  789. []bool{false, false, false, false, false},
  790. },
  791. // revision
  792. {
  793. []string{"a", "b", "c", "d", "e"},
  794. []pb.RangeRequest{
  795. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0},
  796. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 1},
  797. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 2},
  798. {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 3},
  799. },
  800. [][]string{
  801. {"a", "b", "c", "d", "e"},
  802. {},
  803. {"a"},
  804. {"a", "b"},
  805. },
  806. []bool{false, false, false, false},
  807. },
  808. // limit
  809. {
  810. []string{"foo", "bar"},
  811. []pb.RangeRequest{
  812. // more
  813. {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1},
  814. // no more
  815. {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2},
  816. },
  817. [][]string{
  818. {"bar"},
  819. {"bar", "foo"},
  820. },
  821. []bool{true, false},
  822. },
  823. // sort
  824. {
  825. []string{"b", "a", "c", "d", "c"},
  826. []pb.RangeRequest{
  827. {
  828. Key: []byte("a"), RangeEnd: []byte("z"),
  829. Limit: 1,
  830. SortOrder: pb.RangeRequest_ASCEND,
  831. SortTarget: pb.RangeRequest_KEY,
  832. },
  833. {
  834. Key: []byte("a"), RangeEnd: []byte("z"),
  835. Limit: 1,
  836. SortOrder: pb.RangeRequest_DESCEND,
  837. SortTarget: pb.RangeRequest_KEY,
  838. },
  839. {
  840. Key: []byte("a"), RangeEnd: []byte("z"),
  841. Limit: 1,
  842. SortOrder: pb.RangeRequest_ASCEND,
  843. SortTarget: pb.RangeRequest_CREATE,
  844. },
  845. {
  846. Key: []byte("a"), RangeEnd: []byte("z"),
  847. Limit: 1,
  848. SortOrder: pb.RangeRequest_DESCEND,
  849. SortTarget: pb.RangeRequest_MOD,
  850. },
  851. {
  852. Key: []byte("z"), RangeEnd: []byte("z"),
  853. Limit: 1,
  854. SortOrder: pb.RangeRequest_DESCEND,
  855. SortTarget: pb.RangeRequest_CREATE,
  856. },
  857. },
  858. [][]string{
  859. {"a"},
  860. {"d"},
  861. {"b"},
  862. {"c"},
  863. {},
  864. },
  865. []bool{true, true, true, true, false},
  866. },
  867. }
  868. for i, tt := range tests {
  869. clus := newClusterGRPC(t, &clusterConfig{size: 3})
  870. for _, k := range tt.putKeys {
  871. kvc := pb.NewKVClient(clus.RandConn())
  872. req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
  873. if _, err := kvc.Put(context.TODO(), req); err != nil {
  874. t.Fatalf("#%d: couldn't put key (%v)", i, err)
  875. }
  876. }
  877. for j, req := range tt.reqs {
  878. kvc := pb.NewKVClient(clus.RandConn())
  879. resp, err := kvc.Range(context.TODO(), &req)
  880. if err != nil {
  881. t.Errorf("#%d.%d: Range error: %v", i, j, err)
  882. continue
  883. }
  884. if len(resp.Kvs) != len(tt.wresps[j]) {
  885. t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j]))
  886. continue
  887. }
  888. for k, wKey := range tt.wresps[j] {
  889. respKey := string(resp.Kvs[k].Key)
  890. if respKey != wKey {
  891. t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey)
  892. }
  893. }
  894. if resp.More != tt.wmores[j] {
  895. t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j])
  896. }
  897. wrev := req.Revision
  898. if wrev == 0 {
  899. wrev = int64(len(tt.putKeys) + 1)
  900. }
  901. if resp.Header.Revision != wrev {
  902. t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev)
  903. }
  904. }
  905. clus.Terminate(t)
  906. }
  907. }