interceptor.go 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package v3rpc
  15. import (
  16. "sync"
  17. "time"
  18. "github.com/coreos/etcd/etcdserver"
  19. "github.com/coreos/etcd/etcdserver/api"
  20. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  21. "github.com/coreos/etcd/pkg/types"
  22. "github.com/coreos/etcd/raft"
  23. prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
  24. "golang.org/x/net/context"
  25. "google.golang.org/grpc"
  26. "google.golang.org/grpc/metadata"
  27. )
  28. const (
  29. maxNoLeaderCnt = 3
  30. )
  31. type streamsMap struct {
  32. mu sync.Mutex
  33. streams map[grpc.ServerStream]struct{}
  34. }
  35. func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
  36. return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
  37. if !api.IsCapabilityEnabled(api.V3rpcCapability) {
  38. return nil, rpctypes.ErrGRPCNotCapable
  39. }
  40. md, ok := metadata.FromContext(ctx)
  41. if ok {
  42. if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
  43. if s.Leader() == types.ID(raft.None) {
  44. return nil, rpctypes.ErrGRPCNoLeader
  45. }
  46. }
  47. }
  48. return prometheus.UnaryServerInterceptor(ctx, req, info, handler)
  49. }
  50. }
  51. func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
  52. smap := monitorLeader(s)
  53. return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
  54. if !api.IsCapabilityEnabled(api.V3rpcCapability) {
  55. return rpctypes.ErrGRPCNotCapable
  56. }
  57. md, ok := metadata.FromContext(ss.Context())
  58. if ok {
  59. if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
  60. if s.Leader() == types.ID(raft.None) {
  61. return rpctypes.ErrGRPCNoLeader
  62. }
  63. cctx, cancel := context.WithCancel(ss.Context())
  64. ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
  65. smap.mu.Lock()
  66. smap.streams[ss] = struct{}{}
  67. smap.mu.Unlock()
  68. defer func() {
  69. smap.mu.Lock()
  70. delete(smap.streams, ss)
  71. smap.mu.Unlock()
  72. cancel()
  73. }()
  74. }
  75. }
  76. return prometheus.StreamServerInterceptor(srv, ss, info, handler)
  77. }
  78. }
  79. type serverStreamWithCtx struct {
  80. grpc.ServerStream
  81. ctx context.Context
  82. cancel *context.CancelFunc
  83. }
  84. func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
  85. func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
  86. smap := &streamsMap{
  87. streams: make(map[grpc.ServerStream]struct{}),
  88. }
  89. go func() {
  90. election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
  91. noLeaderCnt := 0
  92. for {
  93. select {
  94. case <-s.StopNotify():
  95. return
  96. case <-time.After(election):
  97. if s.Leader() == types.ID(raft.None) {
  98. noLeaderCnt++
  99. } else {
  100. noLeaderCnt = 0
  101. }
  102. // We are more conservative on canceling existing streams. Reconnecting streams
  103. // cost much more than just rejecting new requests. So we wait until the member
  104. // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
  105. if noLeaderCnt >= maxNoLeaderCnt {
  106. smap.mu.Lock()
  107. for ss := range smap.streams {
  108. if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
  109. (*ssWithCtx.cancel)()
  110. <-ss.Context().Done()
  111. }
  112. }
  113. smap.streams = make(map[grpc.ServerStream]struct{})
  114. smap.mu.Unlock()
  115. }
  116. }
  117. }
  118. }()
  119. return smap
  120. }