v3_watch_restore_test.go 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. // Copyright 2018 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package integration
  15. import (
  16. "context"
  17. "fmt"
  18. "testing"
  19. "time"
  20. pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
  21. )
  22. // TestV3WatchRestoreSnapshotUnsync tests whether slow follower can restore
  23. // from leader snapshot, and still notify on watchers from an old revision
  24. // that were created in synced watcher group in the first place.
  25. // TODO: fix panic with gRPC proxy "panic: watcher current revision should not exceed current revision"
  26. func TestV3WatchRestoreSnapshotUnsync(t *testing.T) {
  27. clus := NewClusterV3(t, &ClusterConfig{
  28. Size: 3,
  29. SnapshotCount: 10,
  30. SnapshotCatchUpEntries: 5,
  31. })
  32. defer clus.Terminate(t)
  33. // spawn a watcher before shutdown, and put it in synced watcher
  34. ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
  35. defer cancel()
  36. wStream, errW := toGRPC(clus.Client(0)).Watch.Watch(ctx)
  37. if errW != nil {
  38. t.Fatal(errW)
  39. }
  40. if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
  41. CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 5}}}); err != nil {
  42. t.Fatalf("wStream.Send error: %v", err)
  43. }
  44. wresp, errR := wStream.Recv()
  45. if errR != nil {
  46. t.Errorf("wStream.Recv error: %v", errR)
  47. }
  48. if !wresp.Created {
  49. t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
  50. }
  51. clus.Members[0].InjectPartition(t, clus.Members[1:]...)
  52. clus.waitLeader(t, clus.Members[1:])
  53. time.Sleep(2 * time.Second)
  54. kvc := toGRPC(clus.Client(1)).KV
  55. // to trigger snapshot from the leader to the stopped follower
  56. for i := 0; i < 15; i++ {
  57. _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
  58. if err != nil {
  59. t.Errorf("#%d: couldn't put key (%v)", i, err)
  60. }
  61. }
  62. // trigger snapshot send from leader to this slow follower
  63. // which then calls watchable store Restore
  64. clus.Members[0].RecoverPartition(t, clus.Members[1:]...)
  65. clus.WaitLeader(t)
  66. time.Sleep(2 * time.Second)
  67. // slow follower now applies leader snapshot
  68. // should be able to notify on old-revision watchers in unsynced
  69. // make sure restore watch operation correctly moves watchers
  70. // between synced and unsynced watchers
  71. errc := make(chan error)
  72. go func() {
  73. cresp, cerr := wStream.Recv()
  74. if cerr != nil {
  75. errc <- cerr
  76. return
  77. }
  78. // from start revision 5 to latest revision 16
  79. if len(cresp.Events) != 12 {
  80. errc <- fmt.Errorf("expected 12 events, got %+v", cresp.Events)
  81. return
  82. }
  83. errc <- nil
  84. }()
  85. select {
  86. case <-time.After(10 * time.Second):
  87. t.Fatal("took too long to receive events from restored watcher")
  88. case err := <-errc:
  89. if err != nil {
  90. t.Fatalf("wStream.Recv error: %v", err)
  91. }
  92. }
  93. }