|
@@ -180,7 +180,7 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
|
|
|
for i, tt := range tests {
|
|
for i, tt := range tests {
|
|
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
|
|
|
|
|
|
- wAPI := clus.RandClient().Watch
|
|
|
|
|
|
|
+ wAPI := toGRPC(clus.RandClient()).Watch
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
defer cancel()
|
|
defer cancel()
|
|
|
wStream, err := wAPI.Watch(ctx)
|
|
wStream, err := wAPI.Watch(ctx)
|
|
@@ -212,7 +212,7 @@ func TestV3WatchFromCurrentRevision(t *testing.T) {
|
|
|
// asynchronously create keys
|
|
// asynchronously create keys
|
|
|
go func() {
|
|
go func() {
|
|
|
for _, k := range tt.putKeys {
|
|
for _, k := range tt.putKeys {
|
|
|
- kvc := clus.RandClient().KV
|
|
|
|
|
|
|
+ kvc := toGRPC(clus.RandClient()).KV
|
|
|
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
|
|
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
|
|
|
if _, err := kvc.Put(context.TODO(), req); err != nil {
|
|
if _, err := kvc.Put(context.TODO(), req); err != nil {
|
|
|
t.Fatalf("#%d: couldn't put key (%v)", i, err)
|
|
t.Fatalf("#%d: couldn't put key (%v)", i, err)
|
|
@@ -273,7 +273,7 @@ func testV3WatchCancel(t *testing.T, startRev int64) {
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
defer cancel()
|
|
defer cancel()
|
|
|
- wStream, errW := clus.RandClient().Watch.Watch(ctx)
|
|
|
|
|
|
|
+ wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
|
|
if errW != nil {
|
|
if errW != nil {
|
|
|
t.Fatalf("wAPI.Watch error: %v", errW)
|
|
t.Fatalf("wAPI.Watch error: %v", errW)
|
|
|
}
|
|
}
|
|
@@ -308,7 +308,7 @@ func testV3WatchCancel(t *testing.T, startRev int64) {
|
|
|
t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
|
|
t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- kvc := clus.RandClient().KV
|
|
|
|
|
|
|
+ kvc := toGRPC(clus.RandClient()).KV
|
|
|
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
|
|
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
|
|
|
t.Errorf("couldn't put key (%v)", err)
|
|
t.Errorf("couldn't put key (%v)", err)
|
|
|
}
|
|
}
|
|
@@ -331,7 +331,7 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) {
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
defer cancel()
|
|
defer cancel()
|
|
|
- wStream, wErr := clus.RandClient().Watch.Watch(ctx)
|
|
|
|
|
|
|
+ wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
|
|
if wErr != nil {
|
|
if wErr != nil {
|
|
|
t.Fatalf("wAPI.Watch error: %v", wErr)
|
|
t.Fatalf("wAPI.Watch error: %v", wErr)
|
|
|
}
|
|
}
|
|
@@ -341,7 +341,7 @@ func TestV3WatchCurrentPutOverlap(t *testing.T) {
|
|
|
// first revision already allocated as empty revision
|
|
// first revision already allocated as empty revision
|
|
|
for i := 1; i < nrRevisions; i++ {
|
|
for i := 1; i < nrRevisions; i++ {
|
|
|
go func() {
|
|
go func() {
|
|
|
- kvc := clus.RandClient().KV
|
|
|
|
|
|
|
+ kvc := toGRPC(clus.RandClient()).KV
|
|
|
req := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
|
req := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
|
|
if _, err := kvc.Put(context.TODO(), req); err != nil {
|
|
if _, err := kvc.Put(context.TODO(), req); err != nil {
|
|
|
t.Fatalf("couldn't put key (%v)", err)
|
|
t.Fatalf("couldn't put key (%v)", err)
|
|
@@ -418,11 +418,11 @@ func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
|
|
|
// one watcher to test if it receives expected events.
|
|
// one watcher to test if it receives expected events.
|
|
|
func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
|
|
func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
|
|
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
|
- kvc := clus.RandClient().KV
|
|
|
|
|
|
|
+ kvc := toGRPC(clus.RandClient()).KV
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
defer cancel()
|
|
defer cancel()
|
|
|
- wStream, errW := clus.RandClient().Watch.Watch(ctx)
|
|
|
|
|
|
|
+ wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
|
|
if errW != nil {
|
|
if errW != nil {
|
|
|
t.Fatalf("wAPI.Watch error: %v", errW)
|
|
t.Fatalf("wAPI.Watch error: %v", errW)
|
|
|
}
|
|
}
|
|
@@ -523,7 +523,7 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
defer cancel()
|
|
defer cancel()
|
|
|
- wStream, wErr := clus.RandClient().Watch.Watch(ctx)
|
|
|
|
|
|
|
+ wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
|
|
if wErr != nil {
|
|
if wErr != nil {
|
|
|
t.Fatalf("wAPI.Watch error: %v", wErr)
|
|
t.Fatalf("wAPI.Watch error: %v", wErr)
|
|
|
}
|
|
}
|
|
@@ -535,7 +535,7 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
|
|
|
t.Fatalf("wStream.Send error: %v", err)
|
|
t.Fatalf("wStream.Send error: %v", err)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- kvc := clus.RandClient().KV
|
|
|
|
|
|
|
+ kvc := toGRPC(clus.RandClient()).KV
|
|
|
txn := pb.TxnRequest{}
|
|
txn := pb.TxnRequest{}
|
|
|
for i := 0; i < 3; i++ {
|
|
for i := 0; i < 3; i++ {
|
|
|
ru := &pb.RequestUnion{}
|
|
ru := &pb.RequestUnion{}
|
|
@@ -605,7 +605,7 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
|
|
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
|
defer clus.Terminate(t)
|
|
defer clus.Terminate(t)
|
|
|
|
|
|
|
|
- kvc := clus.RandClient().KV
|
|
|
|
|
|
|
+ kvc := toGRPC(clus.RandClient()).KV
|
|
|
|
|
|
|
|
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
|
|
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
|
|
|
t.Fatalf("couldn't put key (%v)", err)
|
|
t.Fatalf("couldn't put key (%v)", err)
|
|
@@ -616,7 +616,7 @@ func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
defer cancel()
|
|
defer cancel()
|
|
|
- wStream, wErr := clus.RandClient().Watch.Watch(ctx)
|
|
|
|
|
|
|
+ wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
|
|
if wErr != nil {
|
|
if wErr != nil {
|
|
|
t.Fatalf("wAPI.Watch error: %v", wErr)
|
|
t.Fatalf("wAPI.Watch error: %v", wErr)
|
|
|
}
|
|
}
|
|
@@ -692,8 +692,8 @@ func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
|
|
|
// testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
|
|
// testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
|
|
|
func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
|
|
func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
|
|
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
|
|
|
- wAPI := clus.RandClient().Watch
|
|
|
|
|
- kvc := clus.RandClient().KV
|
|
|
|
|
|
|
+ wAPI := toGRPC(clus.RandClient()).Watch
|
|
|
|
|
+ kvc := toGRPC(clus.RandClient()).KV
|
|
|
|
|
|
|
|
streams := make([]pb.Watch_WatchClient, 5)
|
|
streams := make([]pb.Watch_WatchClient, 5)
|
|
|
for i := range streams {
|
|
for i := range streams {
|
|
@@ -792,7 +792,7 @@ func TestV3WatchInvalidFutureRevision(t *testing.T) {
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
defer cancel()
|
|
defer cancel()
|
|
|
- wStream, wErr := clus.RandClient().Watch.Watch(ctx)
|
|
|
|
|
|
|
+ wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
|
|
|
if wErr != nil {
|
|
if wErr != nil {
|
|
|
t.Fatalf("wAPI.Watch error: %v", wErr)
|
|
t.Fatalf("wAPI.Watch error: %v", wErr)
|
|
|
}
|
|
}
|