watch_keepalive_test.go 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. // Copyright 2017 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // +build !cluster_proxy
  15. package integration
  16. import (
  17. "context"
  18. "testing"
  19. "time"
  20. "github.com/coreos/etcd/clientv3"
  21. "github.com/coreos/etcd/integration"
  22. "github.com/coreos/etcd/pkg/testutil"
  23. )
  24. // TestWatchKeepAlive tests when watch discovers it cannot talk to
  25. // blackholed endpoint, client balancer switches to healthy one.
  26. // TODO: test server-to-client keepalive ping
  27. func TestWatchKeepAlive(t *testing.T) {
  28. defer testutil.AfterTest(t)
  29. clus := integration.NewClusterV3(t, &integration.ClusterConfig{
  30. Size: 2,
  31. GRPCKeepAliveMinTime: 1 * time.Millisecond},
  32. ) // avoid too_many_pings
  33. defer clus.Terminate(t)
  34. ccfg := clientv3.Config{
  35. Endpoints: []string{clus.Members[0].GRPCAddr()},
  36. DialTimeout: 3 * time.Second,
  37. DialKeepAliveTime: 1 * time.Second,
  38. DialKeepAliveTimeout: 500 * time.Millisecond,
  39. }
  40. // gRPC internal implementation related.
  41. pingInterval := ccfg.DialKeepAliveTime + ccfg.DialKeepAliveTimeout
  42. timeout := pingInterval + 2*time.Second // 2s for slow machine to process watch and reset connections
  43. cli, err := clientv3.New(ccfg)
  44. if err != nil {
  45. t.Fatal(err)
  46. }
  47. defer cli.Close()
  48. wch := cli.Watch(context.Background(), "foo", clientv3.WithCreatedNotify())
  49. if _, ok := <-wch; !ok {
  50. t.Fatalf("watch failed on creation")
  51. }
  52. // endpoint can switch to ep[1] when it detects the failure of ep0
  53. cli.SetEndpoints(clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr())
  54. clus.Members[0].Blackhole()
  55. if _, err = clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil {
  56. t.Fatal(err)
  57. }
  58. select {
  59. case <-wch:
  60. case <-time.After(timeout):
  61. t.Error("took too long to receive watch events")
  62. }
  63. clus.Members[0].Unblackhole()
  64. clus.Members[1].Blackhole()
  65. // make sure client0 can connect to member 0 after remove the blackhole.
  66. if _, err = clus.Client(0).Get(context.TODO(), "foo"); err != nil {
  67. t.Fatal(err)
  68. }
  69. if _, err = clus.Client(0).Put(context.TODO(), "foo", "bar1"); err != nil {
  70. t.Fatal(err)
  71. }
  72. select {
  73. case <-wch:
  74. case <-time.After(timeout):
  75. t.Error("took too long to receive watch events")
  76. }
  77. }