v3_watch_restore_test.go 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. // Copyright 2018 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // +build !cluster_proxy
  15. package integration
  16. import (
  17. "context"
  18. "fmt"
  19. "testing"
  20. "time"
  21. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  22. )
  23. // TestV3WatchRestoreSnapshotUnsync tests whether slow follower can restore
  24. // from leader snapshot, and still notify on watchers from an old revision
  25. // that were created in synced watcher group in the first place.
  26. // TODO: fix panic with gRPC proxy "panic: watcher current revision should not exceed current revision"
  27. func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
  28. clus := NewClusterV3(t, &ClusterConfig{
  29. Size: 3,
  30. SnapshotCount: 10,
  31. SnapshotCatchUpEntries: 5,
  32. })
  33. defer clus.Terminate(t)
  34. // spawn a watcher before shutdown, and put it in synced watcher
  35. ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
  36. defer cancel()
  37. wStream, errW := toGRPC(clus.Client(0)).Watch.Watch(ctx)
  38. if errW != nil {
  39. t.Fatal(errW)
  40. }
  41. if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
  42. CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 5}}}); err != nil {
  43. t.Fatalf("wStream.Send error: %v", err)
  44. }
  45. wresp, errR := wStream.Recv()
  46. if errR != nil {
  47. t.Errorf("wStream.Recv error: %v", errR)
  48. }
  49. if !wresp.Created {
  50. t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
  51. }
  52. clus.Members[0].InjectPartition(t, clus.Members[1:]...)
  53. clus.waitLeader(t, clus.Members[1:])
  54. time.Sleep(2 * time.Second)
  55. kvc := toGRPC(clus.Client(1)).KV
  56. // to trigger snapshot from the leader to the stopped follower
  57. for i := 0; i < 15; i++ {
  58. _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
  59. if err != nil {
  60. t.Errorf("#%d: couldn't put key (%v)", i, err)
  61. }
  62. }
  63. // trigger snapshot send from leader to this slow follower
  64. // which then calls watchable store Restore
  65. clus.Members[0].RecoverPartition(t, clus.Members[1:]...)
  66. clus.WaitLeader(t)
  67. time.Sleep(2 * time.Second)
  68. // slow follower now applies leader snapshot
  69. // should be able to notify on old-revision watchers in unsynced
  70. // make sure restore watch operation correctly moves watchers
  71. // between synced and unsynced watchers
  72. errc := make(chan error)
  73. go func() {
  74. cresp, cerr := wStream.Recv()
  75. if cerr != nil {
  76. errc <- cerr
  77. return
  78. }
  79. // from start revision 5 to latest revision 16
  80. if len(cresp.Events) != 12 {
  81. errc <- fmt.Errorf("expected 12 events, got %+v", cresp.Events)
  82. return
  83. }
  84. errc <- nil
  85. }()
  86. select {
  87. case <-time.After(10 * time.Second):
  88. t.Fatal("took too long to receive events from restored watcher")
  89. case err := <-errc:
  90. if err != nil {
  91. t.Fatalf("wStream.Recv error: %v", err)
  92. }
  93. }
  94. }