peer.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package rafthttp
  15. import (
  16. "context"
  17. "sync"
  18. "time"
  19. "go.etcd.io/etcd/etcdserver/api/snap"
  20. stats "go.etcd.io/etcd/etcdserver/api/v2stats"
  21. "go.etcd.io/etcd/pkg/types"
  22. "go.etcd.io/etcd/raft"
  23. "go.etcd.io/etcd/raft/raftpb"
  24. "go.uber.org/zap"
  25. "golang.org/x/time/rate"
  26. )
  27. const (
  28. // ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates.
  29. // A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for
  30. // tcp keepalive failing to detect a bad connection, which is at minutes level.
  31. // For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage
  32. // to keep the connection alive.
  33. // For short term pipeline connections, the connection MUST be killed to avoid it being
  34. // put back to http pkg connection pool.
  35. ConnReadTimeout = 5 * time.Second
  36. ConnWriteTimeout = 5 * time.Second
  37. recvBufSize = 4096
  38. // maxPendingProposals holds the proposals during one leader election process.
  39. // Generally one leader election takes at most 1 sec. It should have
  40. // 0-2 election conflicts, and each one takes 0.5 sec.
  41. // We assume the number of concurrent proposers is smaller than 4096.
  42. // One client blocks on its proposal for at least 1 sec, so 4096 is enough
  43. // to hold all proposals.
  44. maxPendingProposals = 4096
  45. streamAppV2 = "streamMsgAppV2"
  46. streamMsg = "streamMsg"
  47. pipelineMsg = "pipeline"
  48. sendSnap = "sendMsgSnap"
  49. )
  50. type Peer interface {
  51. // send sends the message to the remote peer. The function is non-blocking
  52. // and has no promise that the message will be received by the remote.
  53. // When it fails to send message out, it will report the status to underlying
  54. // raft.
  55. send(m raftpb.Message)
  56. // sendSnap sends the merged snapshot message to the remote peer. Its behavior
  57. // is similar to send.
  58. sendSnap(m snap.Message)
  59. // update updates the urls of remote peer.
  60. update(urls types.URLs)
  61. // attachOutgoingConn attaches the outgoing connection to the peer for
  62. // stream usage. After the call, the ownership of the outgoing
  63. // connection hands over to the peer. The peer will close the connection
  64. // when it is no longer used.
  65. attachOutgoingConn(conn *outgoingConn)
  66. // activeSince returns the time that the connection with the
  67. // peer becomes active.
  68. activeSince() time.Time
  69. // stop performs any necessary finalization and terminates the peer
  70. // elegantly.
  71. stop()
  72. }
  73. // peer is the representative of a remote raft node. Local raft node sends
  74. // messages to the remote through peer.
  75. // Each peer has two underlying mechanisms to send out a message: stream and
  76. // pipeline.
  77. // A stream is a receiver initialized long-polling connection, which
  78. // is always open to transfer messages. Besides general stream, peer also has
  79. // a optimized stream for sending msgApp since msgApp accounts for large part
  80. // of all messages. Only raft leader uses the optimized stream to send msgApp
  81. // to the remote follower node.
  82. // A pipeline is a series of http clients that send http requests to the remote.
  83. // It is only used when the stream has not been established.
  84. type peer struct {
  85. lg *zap.Logger
  86. localID types.ID
  87. // id of the remote raft peer node
  88. id types.ID
  89. r Raft
  90. status *peerStatus
  91. picker *urlPicker
  92. msgAppV2Writer *streamWriter
  93. writer *streamWriter
  94. pipeline *pipeline
  95. snapSender *snapshotSender // snapshot sender to send v3 snapshot messages
  96. msgAppV2Reader *streamReader
  97. msgAppReader *streamReader
  98. recvc chan raftpb.Message
  99. propc chan raftpb.Message
  100. mu sync.Mutex
  101. paused bool
  102. cancel context.CancelFunc // cancel pending works in go routine created by peer.
  103. stopc chan struct{}
  104. }
  105. func startPeer(t *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer {
  106. if t.Logger != nil {
  107. t.Logger.Info("starting remote peer", zap.String("remote-peer-id", peerID.String()))
  108. } else {
  109. plog.Infof("starting peer %s...", peerID)
  110. }
  111. defer func() {
  112. if t.Logger != nil {
  113. t.Logger.Info("started remote peer", zap.String("remote-peer-id", peerID.String()))
  114. } else {
  115. plog.Infof("started peer %s", peerID)
  116. }
  117. }()
  118. status := newPeerStatus(t.Logger, t.ID, peerID)
  119. picker := newURLPicker(urls)
  120. errorc := t.ErrorC
  121. r := t.Raft
  122. pipeline := &pipeline{
  123. peerID: peerID,
  124. tr: t,
  125. picker: picker,
  126. status: status,
  127. followerStats: fs,
  128. raft: r,
  129. errorc: errorc,
  130. }
  131. pipeline.start()
  132. p := &peer{
  133. lg: t.Logger,
  134. localID: t.ID,
  135. id: peerID,
  136. r: r,
  137. status: status,
  138. picker: picker,
  139. msgAppV2Writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
  140. writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
  141. pipeline: pipeline,
  142. snapSender: newSnapshotSender(t, picker, peerID, status),
  143. recvc: make(chan raftpb.Message, recvBufSize),
  144. propc: make(chan raftpb.Message, maxPendingProposals),
  145. stopc: make(chan struct{}),
  146. }
  147. ctx, cancel := context.WithCancel(context.Background())
  148. p.cancel = cancel
  149. go func() {
  150. for {
  151. select {
  152. case mm := <-p.recvc:
  153. if err := r.Process(ctx, mm); err != nil {
  154. if t.Logger != nil {
  155. t.Logger.Warn("failed to process Raft message", zap.Error(err))
  156. } else {
  157. plog.Warningf("failed to process raft message (%v)", err)
  158. }
  159. }
  160. case <-p.stopc:
  161. return
  162. }
  163. }
  164. }()
  165. // r.Process might block for processing proposal when there is no leader.
  166. // Thus propc must be put into a separate routine with recvc to avoid blocking
  167. // processing other raft messages.
  168. go func() {
  169. for {
  170. select {
  171. case mm := <-p.propc:
  172. if err := r.Process(ctx, mm); err != nil {
  173. plog.Warningf("failed to process raft message (%v)", err)
  174. }
  175. case <-p.stopc:
  176. return
  177. }
  178. }
  179. }()
  180. p.msgAppV2Reader = &streamReader{
  181. lg: t.Logger,
  182. peerID: peerID,
  183. typ: streamTypeMsgAppV2,
  184. tr: t,
  185. picker: picker,
  186. status: status,
  187. recvc: p.recvc,
  188. propc: p.propc,
  189. rl: rate.NewLimiter(t.DialRetryFrequency, 1),
  190. }
  191. p.msgAppReader = &streamReader{
  192. lg: t.Logger,
  193. peerID: peerID,
  194. typ: streamTypeMessage,
  195. tr: t,
  196. picker: picker,
  197. status: status,
  198. recvc: p.recvc,
  199. propc: p.propc,
  200. rl: rate.NewLimiter(t.DialRetryFrequency, 1),
  201. }
  202. p.msgAppV2Reader.start()
  203. p.msgAppReader.start()
  204. return p
  205. }
  206. func (p *peer) send(m raftpb.Message) {
  207. p.mu.Lock()
  208. paused := p.paused
  209. p.mu.Unlock()
  210. if paused {
  211. return
  212. }
  213. writec, name := p.pick(m)
  214. select {
  215. case writec <- m:
  216. default:
  217. p.r.ReportUnreachable(m.To)
  218. if isMsgSnap(m) {
  219. p.r.ReportSnapshot(m.To, raft.SnapshotFailure)
  220. }
  221. if p.status.isActive() {
  222. if p.lg != nil {
  223. p.lg.Warn(
  224. "dropped internal Raft message since sending buffer is full (overloaded network)",
  225. zap.String("message-type", m.Type.String()),
  226. zap.String("local-member-id", p.localID.String()),
  227. zap.String("from", types.ID(m.From).String()),
  228. zap.String("remote-peer-id", p.id.String()),
  229. zap.Bool("remote-peer-active", p.status.isActive()),
  230. )
  231. } else {
  232. plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name)
  233. }
  234. } else {
  235. if p.lg != nil {
  236. p.lg.Warn(
  237. "dropped internal Raft message since sending buffer is full (overloaded network)",
  238. zap.String("message-type", m.Type.String()),
  239. zap.String("local-member-id", p.localID.String()),
  240. zap.String("from", types.ID(m.From).String()),
  241. zap.String("remote-peer-id", p.id.String()),
  242. zap.Bool("remote-peer-active", p.status.isActive()),
  243. )
  244. } else {
  245. plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name)
  246. }
  247. }
  248. sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
  249. }
  250. }
  251. func (p *peer) sendSnap(m snap.Message) {
  252. go p.snapSender.send(m)
  253. }
  254. func (p *peer) update(urls types.URLs) {
  255. p.picker.update(urls)
  256. }
  257. func (p *peer) attachOutgoingConn(conn *outgoingConn) {
  258. var ok bool
  259. switch conn.t {
  260. case streamTypeMsgAppV2:
  261. ok = p.msgAppV2Writer.attach(conn)
  262. case streamTypeMessage:
  263. ok = p.writer.attach(conn)
  264. default:
  265. if p.lg != nil {
  266. p.lg.Panic("unknown stream type", zap.String("type", conn.t.String()))
  267. } else {
  268. plog.Panicf("unhandled stream type %s", conn.t)
  269. }
  270. }
  271. if !ok {
  272. conn.Close()
  273. }
  274. }
  275. func (p *peer) activeSince() time.Time { return p.status.activeSince() }
  276. // Pause pauses the peer. The peer will simply drops all incoming
  277. // messages without returning an error.
  278. func (p *peer) Pause() {
  279. p.mu.Lock()
  280. defer p.mu.Unlock()
  281. p.paused = true
  282. p.msgAppReader.pause()
  283. p.msgAppV2Reader.pause()
  284. }
  285. // Resume resumes a paused peer.
  286. func (p *peer) Resume() {
  287. p.mu.Lock()
  288. defer p.mu.Unlock()
  289. p.paused = false
  290. p.msgAppReader.resume()
  291. p.msgAppV2Reader.resume()
  292. }
  293. func (p *peer) stop() {
  294. if p.lg != nil {
  295. p.lg.Info("stopping remote peer", zap.String("remote-peer-id", p.id.String()))
  296. } else {
  297. plog.Infof("stopping peer %s...", p.id)
  298. }
  299. defer func() {
  300. if p.lg != nil {
  301. p.lg.Info("stopped remote peer", zap.String("remote-peer-id", p.id.String()))
  302. } else {
  303. plog.Infof("stopped peer %s", p.id)
  304. }
  305. }()
  306. close(p.stopc)
  307. p.cancel()
  308. p.msgAppV2Writer.stop()
  309. p.writer.stop()
  310. p.pipeline.stop()
  311. p.snapSender.stop()
  312. p.msgAppV2Reader.stop()
  313. p.msgAppReader.stop()
  314. }
  315. // pick picks a chan for sending the given message. The picked chan and the picked chan
  316. // string name are returned.
  317. func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) {
  318. var ok bool
  319. // Considering MsgSnap may have a big size, e.g., 1G, and will block
  320. // stream for a long time, only use one of the N pipelines to send MsgSnap.
  321. if isMsgSnap(m) {
  322. return p.pipeline.msgc, pipelineMsg
  323. } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) {
  324. return writec, streamAppV2
  325. } else if writec, ok = p.writer.writec(); ok {
  326. return writec, streamMsg
  327. }
  328. return p.pipeline.msgc, pipelineMsg
  329. }
  330. func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp }
  331. func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap }