peer.go 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package rafthttp
  15. import (
  16. "context"
  17. "sync"
  18. "time"
  19. "github.com/coreos/etcd/etcdserver/stats"
  20. "github.com/coreos/etcd/pkg/types"
  21. "github.com/coreos/etcd/raft"
  22. "github.com/coreos/etcd/raft/raftpb"
  23. "github.com/coreos/etcd/snap"
  24. "golang.org/x/time/rate"
  25. )
  26. const (
  27. // ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates.
  28. // A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for
  29. // tcp keepalive failing to detect a bad connection, which is at minutes level.
  30. // For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage
  31. // to keep the connection alive.
  32. // For short term pipeline connections, the connection MUST be killed to avoid it being
  33. // put back to http pkg connection pool.
  34. ConnReadTimeout = 5 * time.Second
  35. ConnWriteTimeout = 5 * time.Second
  36. recvBufSize = 4096
  37. // maxPendingProposals holds the proposals during one leader election process.
  38. // Generally one leader election takes at most 1 sec. It should have
  39. // 0-2 election conflicts, and each one takes 0.5 sec.
  40. // We assume the number of concurrent proposers is smaller than 4096.
  41. // One client blocks on its proposal for at least 1 sec, so 4096 is enough
  42. // to hold all proposals.
  43. maxPendingProposals = 4096
  44. streamAppV2 = "streamMsgAppV2"
  45. streamMsg = "streamMsg"
  46. pipelineMsg = "pipeline"
  47. sendSnap = "sendMsgSnap"
  48. )
  49. type Peer interface {
  50. // send sends the message to the remote peer. The function is non-blocking
  51. // and has no promise that the message will be received by the remote.
  52. // When it fails to send message out, it will report the status to underlying
  53. // raft.
  54. send(m raftpb.Message)
  55. // sendSnap sends the merged snapshot message to the remote peer. Its behavior
  56. // is similar to send.
  57. sendSnap(m snap.Message)
  58. // update updates the urls of remote peer.
  59. update(urls types.URLs)
  60. // attachOutgoingConn attaches the outgoing connection to the peer for
  61. // stream usage. After the call, the ownership of the outgoing
  62. // connection hands over to the peer. The peer will close the connection
  63. // when it is no longer used.
  64. attachOutgoingConn(conn *outgoingConn)
  65. // activeSince returns the time that the connection with the
  66. // peer becomes active.
  67. activeSince() time.Time
  68. // stop performs any necessary finalization and terminates the peer
  69. // elegantly.
  70. stop()
  71. }
  72. // peer is the representative of a remote raft node. Local raft node sends
  73. // messages to the remote through peer.
  74. // Each peer has two underlying mechanisms to send out a message: stream and
  75. // pipeline.
  76. // A stream is a receiver initialized long-polling connection, which
  77. // is always open to transfer messages. Besides general stream, peer also has
  78. // a optimized stream for sending msgApp since msgApp accounts for large part
  79. // of all messages. Only raft leader uses the optimized stream to send msgApp
  80. // to the remote follower node.
  81. // A pipeline is a series of http clients that send http requests to the remote.
  82. // It is only used when the stream has not been established.
  83. type peer struct {
  84. // id of the remote raft peer node
  85. id types.ID
  86. r Raft
  87. status *peerStatus
  88. picker *urlPicker
  89. msgAppV2Writer *streamWriter
  90. writer *streamWriter
  91. pipeline *pipeline
  92. snapSender *snapshotSender // snapshot sender to send v3 snapshot messages
  93. msgAppV2Reader *streamReader
  94. msgAppReader *streamReader
  95. recvc chan raftpb.Message
  96. propc chan raftpb.Message
  97. mu sync.Mutex
  98. paused bool
  99. cancel context.CancelFunc // cancel pending works in go routine created by peer.
  100. stopc chan struct{}
  101. }
  102. func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer {
  103. plog.Infof("starting peer %s...", peerID)
  104. defer plog.Infof("started peer %s", peerID)
  105. status := newPeerStatus(peerID)
  106. picker := newURLPicker(urls)
  107. errorc := transport.ErrorC
  108. r := transport.Raft
  109. pipeline := &pipeline{
  110. peerID: peerID,
  111. tr: transport,
  112. picker: picker,
  113. status: status,
  114. followerStats: fs,
  115. raft: r,
  116. errorc: errorc,
  117. }
  118. pipeline.start()
  119. p := &peer{
  120. id: peerID,
  121. r: r,
  122. status: status,
  123. picker: picker,
  124. msgAppV2Writer: startStreamWriter(peerID, status, fs, r),
  125. writer: startStreamWriter(peerID, status, fs, r),
  126. pipeline: pipeline,
  127. snapSender: newSnapshotSender(transport, picker, peerID, status),
  128. recvc: make(chan raftpb.Message, recvBufSize),
  129. propc: make(chan raftpb.Message, maxPendingProposals),
  130. stopc: make(chan struct{}),
  131. }
  132. ctx, cancel := context.WithCancel(context.Background())
  133. p.cancel = cancel
  134. go func() {
  135. for {
  136. select {
  137. case mm := <-p.recvc:
  138. if err := r.Process(ctx, mm); err != nil {
  139. plog.Warningf("failed to process raft message (%v)", err)
  140. }
  141. case <-p.stopc:
  142. return
  143. }
  144. }
  145. }()
  146. // r.Process might block for processing proposal when there is no leader.
  147. // Thus propc must be put into a separate routine with recvc to avoid blocking
  148. // processing other raft messages.
  149. go func() {
  150. for {
  151. select {
  152. case mm := <-p.propc:
  153. if err := r.Process(ctx, mm); err != nil {
  154. plog.Warningf("failed to process raft message (%v)", err)
  155. }
  156. case <-p.stopc:
  157. return
  158. }
  159. }
  160. }()
  161. p.msgAppV2Reader = &streamReader{
  162. peerID: peerID,
  163. typ: streamTypeMsgAppV2,
  164. tr: transport,
  165. picker: picker,
  166. status: status,
  167. recvc: p.recvc,
  168. propc: p.propc,
  169. rl: rate.NewLimiter(transport.DialRetryFrequency, 1),
  170. }
  171. p.msgAppReader = &streamReader{
  172. peerID: peerID,
  173. typ: streamTypeMessage,
  174. tr: transport,
  175. picker: picker,
  176. status: status,
  177. recvc: p.recvc,
  178. propc: p.propc,
  179. rl: rate.NewLimiter(transport.DialRetryFrequency, 1),
  180. }
  181. p.msgAppV2Reader.start()
  182. p.msgAppReader.start()
  183. return p
  184. }
  185. func (p *peer) send(m raftpb.Message) {
  186. p.mu.Lock()
  187. paused := p.paused
  188. p.mu.Unlock()
  189. if paused {
  190. return
  191. }
  192. writec, name := p.pick(m)
  193. select {
  194. case writec <- m:
  195. default:
  196. p.r.ReportUnreachable(m.To)
  197. if isMsgSnap(m) {
  198. p.r.ReportSnapshot(m.To, raft.SnapshotFailure)
  199. }
  200. if p.status.isActive() {
  201. plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name)
  202. }
  203. plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name)
  204. sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
  205. }
  206. }
  207. func (p *peer) sendSnap(m snap.Message) {
  208. go p.snapSender.send(m)
  209. }
  210. func (p *peer) update(urls types.URLs) {
  211. p.picker.update(urls)
  212. }
  213. func (p *peer) attachOutgoingConn(conn *outgoingConn) {
  214. var ok bool
  215. switch conn.t {
  216. case streamTypeMsgAppV2:
  217. ok = p.msgAppV2Writer.attach(conn)
  218. case streamTypeMessage:
  219. ok = p.writer.attach(conn)
  220. default:
  221. plog.Panicf("unhandled stream type %s", conn.t)
  222. }
  223. if !ok {
  224. conn.Close()
  225. }
  226. }
  227. func (p *peer) activeSince() time.Time { return p.status.activeSince() }
  228. // Pause pauses the peer. The peer will simply drops all incoming
  229. // messages without returning an error.
  230. func (p *peer) Pause() {
  231. p.mu.Lock()
  232. defer p.mu.Unlock()
  233. p.paused = true
  234. p.msgAppReader.pause()
  235. p.msgAppV2Reader.pause()
  236. }
  237. // Resume resumes a paused peer.
  238. func (p *peer) Resume() {
  239. p.mu.Lock()
  240. defer p.mu.Unlock()
  241. p.paused = false
  242. p.msgAppReader.resume()
  243. p.msgAppV2Reader.resume()
  244. }
  245. func (p *peer) stop() {
  246. plog.Infof("stopping peer %s...", p.id)
  247. defer plog.Infof("stopped peer %s", p.id)
  248. close(p.stopc)
  249. p.cancel()
  250. p.msgAppV2Writer.stop()
  251. p.writer.stop()
  252. p.pipeline.stop()
  253. p.snapSender.stop()
  254. p.msgAppV2Reader.stop()
  255. p.msgAppReader.stop()
  256. }
  257. // pick picks a chan for sending the given message. The picked chan and the picked chan
  258. // string name are returned.
  259. func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) {
  260. var ok bool
  261. // Considering MsgSnap may have a big size, e.g., 1G, and will block
  262. // stream for a long time, only use one of the N pipelines to send MsgSnap.
  263. if isMsgSnap(m) {
  264. return p.pipeline.msgc, pipelineMsg
  265. } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) {
  266. return writec, streamAppV2
  267. } else if writec, ok = p.writer.writec(); ok {
  268. return writec, streamMsg
  269. }
  270. return p.pipeline.msgc, pipelineMsg
  271. }
  272. func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp }
  273. func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap }