async_producer.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921
  1. package sarama
  2. import (
  3. "encoding/binary"
  4. "fmt"
  5. "sync"
  6. "time"
  7. "github.com/eapache/go-resiliency/breaker"
  8. "github.com/eapache/queue"
  9. )
  10. // AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
  11. // to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
  12. // and parses responses for errors. You must read from the Errors() channel or the
  13. // producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
  14. // leaks: it will not be garbage-collected automatically when it passes out of
  15. // scope.
  16. type AsyncProducer interface {
  17. // AsyncClose triggers a shutdown of the producer. The shutdown has completed
  18. // when both the Errors and Successes channels have been closed. When calling
  19. // AsyncClose, you *must* continue to read from those channels in order to
  20. // drain the results of any messages in flight.
  21. AsyncClose()
  22. // Close shuts down the producer and waits for any buffered messages to be
  23. // flushed. You must call this function before a producer object passes out of
  24. // scope, as it may otherwise leak memory. You must call this before calling
  25. // Close on the underlying client.
  26. Close() error
  27. // Input is the input channel for the user to write messages to that they
  28. // wish to send.
  29. Input() chan<- *ProducerMessage
  30. // Successes is the success output channel back to the user when Return.Successes is
  31. // enabled. If Return.Successes is true, you MUST read from this channel or the
  32. // Producer will deadlock. It is suggested that you send and read messages
  33. // together in a single select statement.
  34. Successes() <-chan *ProducerMessage
  35. // Errors is the error output channel back to the user. You MUST read from this
  36. // channel or the Producer will deadlock when the channel is full. Alternatively,
  37. // you can set Producer.Return.Errors in your config to false, which prevents
  38. // errors to be returned.
  39. Errors() <-chan *ProducerError
  40. }
  41. type asyncProducer struct {
  42. client Client
  43. conf *Config
  44. ownClient bool
  45. errors chan *ProducerError
  46. input, successes, retries chan *ProducerMessage
  47. inFlight sync.WaitGroup
  48. brokers map[*Broker]chan<- *ProducerMessage
  49. brokerRefs map[chan<- *ProducerMessage]int
  50. brokerLock sync.Mutex
  51. }
  52. // NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
  53. func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
  54. client, err := NewClient(addrs, conf)
  55. if err != nil {
  56. return nil, err
  57. }
  58. p, err := NewAsyncProducerFromClient(client)
  59. if err != nil {
  60. return nil, err
  61. }
  62. p.(*asyncProducer).ownClient = true
  63. return p, nil
  64. }
  65. // NewAsyncProducerFromClient creates a new Producer using the given client. It is still
  66. // necessary to call Close() on the underlying client when shutting down this producer.
  67. func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
  68. // Check that we are not dealing with a closed Client before processing any other arguments
  69. if client.Closed() {
  70. return nil, ErrClosedClient
  71. }
  72. p := &asyncProducer{
  73. client: client,
  74. conf: client.Config(),
  75. errors: make(chan *ProducerError),
  76. input: make(chan *ProducerMessage),
  77. successes: make(chan *ProducerMessage),
  78. retries: make(chan *ProducerMessage),
  79. brokers: make(map[*Broker]chan<- *ProducerMessage),
  80. brokerRefs: make(map[chan<- *ProducerMessage]int),
  81. }
  82. // launch our singleton dispatchers
  83. go withRecover(p.dispatcher)
  84. go withRecover(p.retryHandler)
  85. return p, nil
  86. }
  87. type flagSet int8
  88. const (
  89. syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
  90. fin // final message from partitionProducer to brokerProducer and back
  91. shutdown // start the shutdown process
  92. )
  93. // ProducerMessage is the collection of elements passed to the Producer in order to send a message.
  94. type ProducerMessage struct {
  95. Topic string // The Kafka topic for this message.
  96. // The partitioning key for this message. Pre-existing Encoders include
  97. // StringEncoder and ByteEncoder.
  98. Key Encoder
  99. // The actual message to store in Kafka. Pre-existing Encoders include
  100. // StringEncoder and ByteEncoder.
  101. Value Encoder
  102. // The headers are key-value pairs that are transparently passed
  103. // by Kafka between producers and consumers.
  104. Headers []RecordHeader
  105. // This field is used to hold arbitrary data you wish to include so it
  106. // will be available when receiving on the Successes and Errors channels.
  107. // Sarama completely ignores this field and is only to be used for
  108. // pass-through data.
  109. Metadata interface{}
  110. // Below this point are filled in by the producer as the message is processed
  111. // Offset is the offset of the message stored on the broker. This is only
  112. // guaranteed to be defined if the message was successfully delivered and
  113. // RequiredAcks is not NoResponse.
  114. Offset int64
  115. // Partition is the partition that the message was sent to. This is only
  116. // guaranteed to be defined if the message was successfully delivered.
  117. Partition int32
  118. // Timestamp is the timestamp assigned to the message by the broker. This
  119. // is only guaranteed to be defined if the message was successfully
  120. // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
  121. // least version 0.10.0.
  122. Timestamp time.Time
  123. retries int
  124. flags flagSet
  125. }
  126. const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
  127. func (m *ProducerMessage) byteSize(version int) int {
  128. var size int
  129. if version >= 2 {
  130. size = maximumRecordOverhead
  131. for _, h := range m.Headers {
  132. size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
  133. }
  134. } else {
  135. size = producerMessageOverhead
  136. }
  137. if m.Key != nil {
  138. size += m.Key.Length()
  139. }
  140. if m.Value != nil {
  141. size += m.Value.Length()
  142. }
  143. return size
  144. }
  145. func (m *ProducerMessage) clear() {
  146. m.flags = 0
  147. m.retries = 0
  148. }
  149. // ProducerError is the type of error generated when the producer fails to deliver a message.
  150. // It contains the original ProducerMessage as well as the actual error value.
  151. type ProducerError struct {
  152. Msg *ProducerMessage
  153. Err error
  154. }
  155. func (pe ProducerError) Error() string {
  156. return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
  157. }
  158. // ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
  159. // It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
  160. // when closing a producer.
  161. type ProducerErrors []*ProducerError
  162. func (pe ProducerErrors) Error() string {
  163. return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
  164. }
  165. func (p *asyncProducer) Errors() <-chan *ProducerError {
  166. return p.errors
  167. }
  168. func (p *asyncProducer) Successes() <-chan *ProducerMessage {
  169. return p.successes
  170. }
  171. func (p *asyncProducer) Input() chan<- *ProducerMessage {
  172. return p.input
  173. }
  174. func (p *asyncProducer) Close() error {
  175. p.AsyncClose()
  176. if p.conf.Producer.Return.Successes {
  177. go withRecover(func() {
  178. for range p.successes {
  179. }
  180. })
  181. }
  182. var errors ProducerErrors
  183. if p.conf.Producer.Return.Errors {
  184. for event := range p.errors {
  185. errors = append(errors, event)
  186. }
  187. } else {
  188. <-p.errors
  189. }
  190. if len(errors) > 0 {
  191. return errors
  192. }
  193. return nil
  194. }
  195. func (p *asyncProducer) AsyncClose() {
  196. go withRecover(p.shutdown)
  197. }
  198. // singleton
  199. // dispatches messages by topic
  200. func (p *asyncProducer) dispatcher() {
  201. handlers := make(map[string]chan<- *ProducerMessage)
  202. shuttingDown := false
  203. for msg := range p.input {
  204. if msg == nil {
  205. Logger.Println("Something tried to send a nil message, it was ignored.")
  206. continue
  207. }
  208. if msg.flags&shutdown != 0 {
  209. shuttingDown = true
  210. p.inFlight.Done()
  211. continue
  212. } else if msg.retries == 0 {
  213. if shuttingDown {
  214. // we can't just call returnError here because that decrements the wait group,
  215. // which hasn't been incremented yet for this message, and shouldn't be
  216. pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
  217. if p.conf.Producer.Return.Errors {
  218. p.errors <- pErr
  219. } else {
  220. Logger.Println(pErr)
  221. }
  222. continue
  223. }
  224. p.inFlight.Add(1)
  225. }
  226. version := 1
  227. if p.conf.Version.IsAtLeast(V0_11_0_0) {
  228. version = 2
  229. }
  230. if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
  231. p.returnError(msg, ErrMessageSizeTooLarge)
  232. continue
  233. }
  234. handler := handlers[msg.Topic]
  235. if handler == nil {
  236. handler = p.newTopicProducer(msg.Topic)
  237. handlers[msg.Topic] = handler
  238. }
  239. handler <- msg
  240. }
  241. for _, handler := range handlers {
  242. close(handler)
  243. }
  244. }
  245. // one per topic
  246. // partitions messages, then dispatches them by partition
  247. type topicProducer struct {
  248. parent *asyncProducer
  249. topic string
  250. input <-chan *ProducerMessage
  251. breaker *breaker.Breaker
  252. handlers map[int32]chan<- *ProducerMessage
  253. partitioner Partitioner
  254. }
  255. func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
  256. input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
  257. tp := &topicProducer{
  258. parent: p,
  259. topic: topic,
  260. input: input,
  261. breaker: breaker.New(3, 1, 10*time.Second),
  262. handlers: make(map[int32]chan<- *ProducerMessage),
  263. partitioner: p.conf.Producer.Partitioner(topic),
  264. }
  265. go withRecover(tp.dispatch)
  266. return input
  267. }
  268. func (tp *topicProducer) dispatch() {
  269. for msg := range tp.input {
  270. if msg.retries == 0 {
  271. if err := tp.partitionMessage(msg); err != nil {
  272. tp.parent.returnError(msg, err)
  273. continue
  274. }
  275. }
  276. handler := tp.handlers[msg.Partition]
  277. if handler == nil {
  278. handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
  279. tp.handlers[msg.Partition] = handler
  280. }
  281. handler <- msg
  282. }
  283. for _, handler := range tp.handlers {
  284. close(handler)
  285. }
  286. }
  287. func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
  288. var partitions []int32
  289. err := tp.breaker.Run(func() (err error) {
  290. if tp.partitioner.RequiresConsistency() {
  291. partitions, err = tp.parent.client.Partitions(msg.Topic)
  292. } else {
  293. partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
  294. }
  295. return
  296. })
  297. if err != nil {
  298. return err
  299. }
  300. numPartitions := int32(len(partitions))
  301. if numPartitions == 0 {
  302. return ErrLeaderNotAvailable
  303. }
  304. choice, err := tp.partitioner.Partition(msg, numPartitions)
  305. if err != nil {
  306. return err
  307. } else if choice < 0 || choice >= numPartitions {
  308. return ErrInvalidPartition
  309. }
  310. msg.Partition = partitions[choice]
  311. return nil
  312. }
  313. // one per partition per topic
  314. // dispatches messages to the appropriate broker
  315. // also responsible for maintaining message order during retries
  316. type partitionProducer struct {
  317. parent *asyncProducer
  318. topic string
  319. partition int32
  320. input <-chan *ProducerMessage
  321. leader *Broker
  322. breaker *breaker.Breaker
  323. output chan<- *ProducerMessage
  324. // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
  325. // all other messages get buffered in retryState[msg.retries].buf to preserve ordering
  326. // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
  327. // therefore whether our buffer is complete and safe to flush)
  328. highWatermark int
  329. retryState []partitionRetryState
  330. }
  331. type partitionRetryState struct {
  332. buf []*ProducerMessage
  333. expectChaser bool
  334. }
  335. func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
  336. input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
  337. pp := &partitionProducer{
  338. parent: p,
  339. topic: topic,
  340. partition: partition,
  341. input: input,
  342. breaker: breaker.New(3, 1, 10*time.Second),
  343. retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
  344. }
  345. go withRecover(pp.dispatch)
  346. return input
  347. }
  348. func (pp *partitionProducer) dispatch() {
  349. // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
  350. // on the first message
  351. pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
  352. if pp.leader != nil {
  353. pp.output = pp.parent.getBrokerProducer(pp.leader)
  354. pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
  355. pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
  356. }
  357. for msg := range pp.input {
  358. if msg.retries > pp.highWatermark {
  359. // a new, higher, retry level; handle it and then back off
  360. pp.newHighWatermark(msg.retries)
  361. time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
  362. } else if pp.highWatermark > 0 {
  363. // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
  364. if msg.retries < pp.highWatermark {
  365. // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
  366. if msg.flags&fin == fin {
  367. pp.retryState[msg.retries].expectChaser = false
  368. pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
  369. } else {
  370. pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
  371. }
  372. continue
  373. } else if msg.flags&fin == fin {
  374. // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
  375. // meaning this retry level is done and we can go down (at least) one level and flush that
  376. pp.retryState[pp.highWatermark].expectChaser = false
  377. pp.flushRetryBuffers()
  378. pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
  379. continue
  380. }
  381. }
  382. // if we made it this far then the current msg contains real data, and can be sent to the next goroutine
  383. // without breaking any of our ordering guarantees
  384. if pp.output == nil {
  385. if err := pp.updateLeader(); err != nil {
  386. pp.parent.returnError(msg, err)
  387. time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
  388. continue
  389. }
  390. Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
  391. }
  392. pp.output <- msg
  393. }
  394. if pp.output != nil {
  395. pp.parent.unrefBrokerProducer(pp.leader, pp.output)
  396. }
  397. }
  398. func (pp *partitionProducer) newHighWatermark(hwm int) {
  399. Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
  400. pp.highWatermark = hwm
  401. // send off a fin so that we know when everything "in between" has made it
  402. // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
  403. pp.retryState[pp.highWatermark].expectChaser = true
  404. pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
  405. pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
  406. // a new HWM means that our current broker selection is out of date
  407. Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
  408. pp.parent.unrefBrokerProducer(pp.leader, pp.output)
  409. pp.output = nil
  410. }
  411. func (pp *partitionProducer) flushRetryBuffers() {
  412. Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
  413. for {
  414. pp.highWatermark--
  415. if pp.output == nil {
  416. if err := pp.updateLeader(); err != nil {
  417. pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
  418. goto flushDone
  419. }
  420. Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
  421. }
  422. for _, msg := range pp.retryState[pp.highWatermark].buf {
  423. pp.output <- msg
  424. }
  425. flushDone:
  426. pp.retryState[pp.highWatermark].buf = nil
  427. if pp.retryState[pp.highWatermark].expectChaser {
  428. Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
  429. break
  430. } else if pp.highWatermark == 0 {
  431. Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
  432. break
  433. }
  434. }
  435. }
  436. func (pp *partitionProducer) updateLeader() error {
  437. return pp.breaker.Run(func() (err error) {
  438. if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
  439. return err
  440. }
  441. if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
  442. return err
  443. }
  444. pp.output = pp.parent.getBrokerProducer(pp.leader)
  445. pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
  446. pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
  447. return nil
  448. })
  449. }
  450. // one per broker; also constructs an associated flusher
  451. func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
  452. var (
  453. input = make(chan *ProducerMessage)
  454. bridge = make(chan *produceSet)
  455. responses = make(chan *brokerProducerResponse)
  456. )
  457. bp := &brokerProducer{
  458. parent: p,
  459. broker: broker,
  460. input: input,
  461. output: bridge,
  462. responses: responses,
  463. buffer: newProduceSet(p),
  464. currentRetries: make(map[string]map[int32]error),
  465. }
  466. go withRecover(bp.run)
  467. // minimal bridge to make the network response `select`able
  468. go withRecover(func() {
  469. for set := range bridge {
  470. request := set.buildRequest()
  471. response, err := broker.Produce(request)
  472. responses <- &brokerProducerResponse{
  473. set: set,
  474. err: err,
  475. res: response,
  476. }
  477. }
  478. close(responses)
  479. })
  480. return input
  481. }
  482. type brokerProducerResponse struct {
  483. set *produceSet
  484. err error
  485. res *ProduceResponse
  486. }
  487. // groups messages together into appropriately-sized batches for sending to the broker
  488. // handles state related to retries etc
  489. type brokerProducer struct {
  490. parent *asyncProducer
  491. broker *Broker
  492. input <-chan *ProducerMessage
  493. output chan<- *produceSet
  494. responses <-chan *brokerProducerResponse
  495. buffer *produceSet
  496. timer <-chan time.Time
  497. timerFired bool
  498. closing error
  499. currentRetries map[string]map[int32]error
  500. }
  501. func (bp *brokerProducer) run() {
  502. var output chan<- *produceSet
  503. Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
  504. for {
  505. select {
  506. case msg := <-bp.input:
  507. if msg == nil {
  508. bp.shutdown()
  509. return
  510. }
  511. if msg.flags&syn == syn {
  512. Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
  513. bp.broker.ID(), msg.Topic, msg.Partition)
  514. if bp.currentRetries[msg.Topic] == nil {
  515. bp.currentRetries[msg.Topic] = make(map[int32]error)
  516. }
  517. bp.currentRetries[msg.Topic][msg.Partition] = nil
  518. bp.parent.inFlight.Done()
  519. continue
  520. }
  521. if reason := bp.needsRetry(msg); reason != nil {
  522. bp.parent.retryMessage(msg, reason)
  523. if bp.closing == nil && msg.flags&fin == fin {
  524. // we were retrying this partition but we can start processing again
  525. delete(bp.currentRetries[msg.Topic], msg.Partition)
  526. Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
  527. bp.broker.ID(), msg.Topic, msg.Partition)
  528. }
  529. continue
  530. }
  531. if bp.buffer.wouldOverflow(msg) {
  532. if err := bp.waitForSpace(msg); err != nil {
  533. bp.parent.retryMessage(msg, err)
  534. continue
  535. }
  536. }
  537. if err := bp.buffer.add(msg); err != nil {
  538. bp.parent.returnError(msg, err)
  539. continue
  540. }
  541. if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
  542. bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
  543. }
  544. case <-bp.timer:
  545. bp.timerFired = true
  546. case output <- bp.buffer:
  547. bp.rollOver()
  548. case response := <-bp.responses:
  549. bp.handleResponse(response)
  550. }
  551. if bp.timerFired || bp.buffer.readyToFlush() {
  552. output = bp.output
  553. } else {
  554. output = nil
  555. }
  556. }
  557. }
  558. func (bp *brokerProducer) shutdown() {
  559. for !bp.buffer.empty() {
  560. select {
  561. case response := <-bp.responses:
  562. bp.handleResponse(response)
  563. case bp.output <- bp.buffer:
  564. bp.rollOver()
  565. }
  566. }
  567. close(bp.output)
  568. for response := range bp.responses {
  569. bp.handleResponse(response)
  570. }
  571. Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
  572. }
  573. func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
  574. if bp.closing != nil {
  575. return bp.closing
  576. }
  577. return bp.currentRetries[msg.Topic][msg.Partition]
  578. }
  579. func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
  580. Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
  581. for {
  582. select {
  583. case response := <-bp.responses:
  584. bp.handleResponse(response)
  585. // handling a response can change our state, so re-check some things
  586. if reason := bp.needsRetry(msg); reason != nil {
  587. return reason
  588. } else if !bp.buffer.wouldOverflow(msg) {
  589. return nil
  590. }
  591. case bp.output <- bp.buffer:
  592. bp.rollOver()
  593. return nil
  594. }
  595. }
  596. }
  597. func (bp *brokerProducer) rollOver() {
  598. bp.timer = nil
  599. bp.timerFired = false
  600. bp.buffer = newProduceSet(bp.parent)
  601. }
  602. func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
  603. if response.err != nil {
  604. bp.handleError(response.set, response.err)
  605. } else {
  606. bp.handleSuccess(response.set, response.res)
  607. }
  608. if bp.buffer.empty() {
  609. bp.rollOver() // this can happen if the response invalidated our buffer
  610. }
  611. }
  612. func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
  613. // we iterate through the blocks in the request set, not the response, so that we notice
  614. // if the response is missing a block completely
  615. sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
  616. if response == nil {
  617. // this only happens when RequiredAcks is NoResponse, so we have to assume success
  618. bp.parent.returnSuccesses(msgs)
  619. return
  620. }
  621. block := response.GetBlock(topic, partition)
  622. if block == nil {
  623. bp.parent.returnErrors(msgs, ErrIncompleteResponse)
  624. return
  625. }
  626. switch block.Err {
  627. // Success
  628. case ErrNoError:
  629. if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
  630. for _, msg := range msgs {
  631. msg.Timestamp = block.Timestamp
  632. }
  633. }
  634. for i, msg := range msgs {
  635. msg.Offset = block.Offset + int64(i)
  636. }
  637. bp.parent.returnSuccesses(msgs)
  638. // Retriable errors
  639. case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
  640. ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
  641. Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
  642. bp.broker.ID(), topic, partition, block.Err)
  643. bp.currentRetries[topic][partition] = block.Err
  644. bp.parent.retryMessages(msgs, block.Err)
  645. bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
  646. // Other non-retriable errors
  647. default:
  648. bp.parent.returnErrors(msgs, block.Err)
  649. }
  650. })
  651. }
  652. func (bp *brokerProducer) handleError(sent *produceSet, err error) {
  653. switch err.(type) {
  654. case PacketEncodingError:
  655. sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
  656. bp.parent.returnErrors(msgs, err)
  657. })
  658. default:
  659. Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
  660. bp.parent.abandonBrokerConnection(bp.broker)
  661. _ = bp.broker.Close()
  662. bp.closing = err
  663. sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
  664. bp.parent.retryMessages(msgs, err)
  665. })
  666. bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
  667. bp.parent.retryMessages(msgs, err)
  668. })
  669. bp.rollOver()
  670. }
  671. }
  672. // singleton
  673. // effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
  674. // based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
  675. func (p *asyncProducer) retryHandler() {
  676. var msg *ProducerMessage
  677. buf := queue.New()
  678. for {
  679. if buf.Length() == 0 {
  680. msg = <-p.retries
  681. } else {
  682. select {
  683. case msg = <-p.retries:
  684. case p.input <- buf.Peek().(*ProducerMessage):
  685. buf.Remove()
  686. continue
  687. }
  688. }
  689. if msg == nil {
  690. return
  691. }
  692. buf.Add(msg)
  693. }
  694. }
  695. // utility functions
  696. func (p *asyncProducer) shutdown() {
  697. Logger.Println("Producer shutting down.")
  698. p.inFlight.Add(1)
  699. p.input <- &ProducerMessage{flags: shutdown}
  700. p.inFlight.Wait()
  701. if p.ownClient {
  702. err := p.client.Close()
  703. if err != nil {
  704. Logger.Println("producer/shutdown failed to close the embedded client:", err)
  705. }
  706. }
  707. close(p.input)
  708. close(p.retries)
  709. close(p.errors)
  710. close(p.successes)
  711. }
  712. func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
  713. msg.clear()
  714. pErr := &ProducerError{Msg: msg, Err: err}
  715. if p.conf.Producer.Return.Errors {
  716. p.errors <- pErr
  717. } else {
  718. Logger.Println(pErr)
  719. }
  720. p.inFlight.Done()
  721. }
  722. func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
  723. for _, msg := range batch {
  724. p.returnError(msg, err)
  725. }
  726. }
  727. func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
  728. for _, msg := range batch {
  729. if p.conf.Producer.Return.Successes {
  730. msg.clear()
  731. p.successes <- msg
  732. }
  733. p.inFlight.Done()
  734. }
  735. }
  736. func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
  737. if msg.retries >= p.conf.Producer.Retry.Max {
  738. p.returnError(msg, err)
  739. } else {
  740. msg.retries++
  741. p.retries <- msg
  742. }
  743. }
  744. func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
  745. for _, msg := range batch {
  746. p.retryMessage(msg, err)
  747. }
  748. }
  749. func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
  750. p.brokerLock.Lock()
  751. defer p.brokerLock.Unlock()
  752. bp := p.brokers[broker]
  753. if bp == nil {
  754. bp = p.newBrokerProducer(broker)
  755. p.brokers[broker] = bp
  756. p.brokerRefs[bp] = 0
  757. }
  758. p.brokerRefs[bp]++
  759. return bp
  760. }
  761. func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
  762. p.brokerLock.Lock()
  763. defer p.brokerLock.Unlock()
  764. p.brokerRefs[bp]--
  765. if p.brokerRefs[bp] == 0 {
  766. close(bp)
  767. delete(p.brokerRefs, bp)
  768. if p.brokers[broker] == bp {
  769. delete(p.brokers, broker)
  770. }
  771. }
  772. }
  773. func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
  774. p.brokerLock.Lock()
  775. defer p.brokerLock.Unlock()
  776. delete(p.brokers, broker)
  777. }