consumer.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. package sarama
  2. import (
  3. "fmt"
  4. "sync"
  5. "time"
  6. )
  7. // ConsumerMessage encapsulates a Kafka message returned by the consumer.
  8. type ConsumerMessage struct {
  9. Key, Value []byte
  10. Topic string
  11. Partition int32
  12. Offset int64
  13. }
  14. // ConsumerError is what is provided to the user when an error occurs.
  15. // It wraps an error and includes the topic and partition.
  16. type ConsumerError struct {
  17. Topic string
  18. Partition int32
  19. Err error
  20. }
  21. func (ce ConsumerError) Error() string {
  22. return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
  23. }
  24. // ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
  25. // It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
  26. // when stopping.
  27. type ConsumerErrors []*ConsumerError
  28. func (ce ConsumerErrors) Error() string {
  29. return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
  30. }
  31. // Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
  32. // on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
  33. // scope.
  34. type Consumer interface {
  35. // ConsumePartition creates a PartitionConsumer on the given topic/partition with the given offset. It will
  36. // return an error if this Consumer is already consuming on the given topic/partition. Offset can be a
  37. // literal offset, or OffsetNewest or OffsetOldest
  38. ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
  39. // Close shuts down the consumer. It must be called after all child PartitionConsumers have already been closed.
  40. Close() error
  41. }
  42. type consumer struct {
  43. client Client
  44. conf *Config
  45. ownClient bool
  46. lock sync.Mutex
  47. children map[string]map[int32]*partitionConsumer
  48. brokerConsumers map[*Broker]*brokerConsumer
  49. }
  50. // NewConsumer creates a new consumer using the given broker addresses and configuration.
  51. func NewConsumer(addrs []string, config *Config) (Consumer, error) {
  52. client, err := NewClient(addrs, config)
  53. if err != nil {
  54. return nil, err
  55. }
  56. c, err := NewConsumerFromClient(client)
  57. if err != nil {
  58. return nil, err
  59. }
  60. c.(*consumer).ownClient = true
  61. return c, nil
  62. }
  63. // NewConsumerFromClient creates a new consumer using the given client. It is still
  64. // necessary to call Close() on the underlying client when shutting down this consumer.
  65. func NewConsumerFromClient(client Client) (Consumer, error) {
  66. // Check that we are not dealing with a closed Client before processing any other arguments
  67. if client.Closed() {
  68. return nil, ErrClosedClient
  69. }
  70. c := &consumer{
  71. client: client,
  72. conf: client.Config(),
  73. children: make(map[string]map[int32]*partitionConsumer),
  74. brokerConsumers: make(map[*Broker]*brokerConsumer),
  75. }
  76. return c, nil
  77. }
  78. func (c *consumer) Close() error {
  79. if c.ownClient {
  80. return c.client.Close()
  81. }
  82. return nil
  83. }
  84. func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
  85. child := &partitionConsumer{
  86. consumer: c,
  87. conf: c.conf,
  88. topic: topic,
  89. partition: partition,
  90. messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
  91. errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
  92. trigger: make(chan none, 1),
  93. dying: make(chan none),
  94. fetchSize: c.conf.Consumer.Fetch.Default,
  95. }
  96. if err := child.chooseStartingOffset(offset); err != nil {
  97. return nil, err
  98. }
  99. var leader *Broker
  100. var err error
  101. if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
  102. return nil, err
  103. }
  104. if err := c.addChild(child); err != nil {
  105. return nil, err
  106. }
  107. go withRecover(child.dispatcher)
  108. child.broker = c.refBrokerConsumer(leader)
  109. child.broker.input <- child
  110. return child, nil
  111. }
  112. func (c *consumer) addChild(child *partitionConsumer) error {
  113. c.lock.Lock()
  114. defer c.lock.Unlock()
  115. topicChildren := c.children[child.topic]
  116. if topicChildren == nil {
  117. topicChildren = make(map[int32]*partitionConsumer)
  118. c.children[child.topic] = topicChildren
  119. }
  120. if topicChildren[child.partition] != nil {
  121. return ConfigurationError("That topic/partition is already being consumed")
  122. }
  123. topicChildren[child.partition] = child
  124. return nil
  125. }
  126. func (c *consumer) removeChild(child *partitionConsumer) {
  127. c.lock.Lock()
  128. defer c.lock.Unlock()
  129. delete(c.children[child.topic], child.partition)
  130. }
  131. func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
  132. c.lock.Lock()
  133. defer c.lock.Unlock()
  134. brokerWorker := c.brokerConsumers[broker]
  135. if brokerWorker == nil {
  136. brokerWorker = &brokerConsumer{
  137. consumer: c,
  138. broker: broker,
  139. input: make(chan *partitionConsumer),
  140. newSubscriptions: make(chan []*partitionConsumer),
  141. wait: make(chan none),
  142. subscriptions: make(map[*partitionConsumer]none),
  143. refs: 0,
  144. }
  145. go withRecover(brokerWorker.subscriptionManager)
  146. go withRecover(brokerWorker.subscriptionConsumer)
  147. c.brokerConsumers[broker] = brokerWorker
  148. }
  149. brokerWorker.refs++
  150. return brokerWorker
  151. }
  152. func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
  153. c.lock.Lock()
  154. defer c.lock.Unlock()
  155. brokerWorker.refs--
  156. if brokerWorker.refs == 0 {
  157. close(brokerWorker.input)
  158. if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
  159. delete(c.brokerConsumers, brokerWorker.broker)
  160. }
  161. }
  162. }
  163. func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
  164. c.lock.Lock()
  165. defer c.lock.Unlock()
  166. delete(c.brokerConsumers, brokerWorker.broker)
  167. }
  168. // PartitionConsumer
  169. // PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
  170. // or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
  171. // when it passes out of scope.
  172. //
  173. // The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
  174. // loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
  175. // as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
  176. // notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
  177. // By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
  178. // your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
  179. // or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
  180. type PartitionConsumer interface {
  181. // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately,
  182. // after which you should wait until the 'messages' and 'errors' channel are drained.
  183. // It is required to call this function, or Close before a consumer object passes out of scope,
  184. // as it will otherwise leak memory. You must call this before calling Close on the underlying
  185. // client.
  186. AsyncClose()
  187. // Close stops the PartitionConsumer from fetching messages. It is required to call this function
  188. // (or AsyncClose) before a consumer object passes out of scope, as it will otherwise leak memory. You must
  189. // call this before calling Close on the underlying client.
  190. Close() error
  191. // Messages returns the read channel for the messages that are returned by the broker.
  192. Messages() <-chan *ConsumerMessage
  193. // Errors returns a read channel of errors that occured during consuming, if enabled. By default,
  194. // errors are logged and not returned over this channel. If you want to implement any custom errpr
  195. // handling, set your config's Consumer.Return.Errors setting to true, and read from this channel.
  196. Errors() <-chan *ConsumerError
  197. }
  198. type partitionConsumer struct {
  199. consumer *consumer
  200. conf *Config
  201. topic string
  202. partition int32
  203. broker *brokerConsumer
  204. messages chan *ConsumerMessage
  205. errors chan *ConsumerError
  206. trigger, dying chan none
  207. fetchSize int32
  208. offset int64
  209. }
  210. func (child *partitionConsumer) sendError(err error) {
  211. cErr := &ConsumerError{
  212. Topic: child.topic,
  213. Partition: child.partition,
  214. Err: err,
  215. }
  216. if child.conf.Consumer.Return.Errors {
  217. child.errors <- cErr
  218. } else {
  219. Logger.Println(cErr)
  220. }
  221. }
  222. func (child *partitionConsumer) dispatcher() {
  223. for _ = range child.trigger {
  224. select {
  225. case <-child.dying:
  226. close(child.trigger)
  227. case <-time.After(child.conf.Consumer.Retry.Backoff):
  228. if child.broker != nil {
  229. child.consumer.unrefBrokerConsumer(child.broker)
  230. child.broker = nil
  231. }
  232. Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
  233. if err := child.dispatch(); err != nil {
  234. child.sendError(err)
  235. child.trigger <- none{}
  236. }
  237. }
  238. }
  239. if child.broker != nil {
  240. child.consumer.unrefBrokerConsumer(child.broker)
  241. }
  242. child.consumer.removeChild(child)
  243. close(child.messages)
  244. close(child.errors)
  245. }
  246. func (child *partitionConsumer) dispatch() error {
  247. if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
  248. return err
  249. }
  250. var leader *Broker
  251. var err error
  252. if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
  253. return err
  254. }
  255. child.broker = child.consumer.refBrokerConsumer(leader)
  256. child.broker.input <- child
  257. return nil
  258. }
  259. func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
  260. newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
  261. if err != nil {
  262. return err
  263. }
  264. oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
  265. if err != nil {
  266. return err
  267. }
  268. switch {
  269. case offset == OffsetNewest:
  270. child.offset = newestOffset
  271. case offset == OffsetOldest:
  272. child.offset = oldestOffset
  273. case offset >= oldestOffset && offset <= newestOffset:
  274. child.offset = offset
  275. default:
  276. return ErrOffsetOutOfRange
  277. }
  278. return nil
  279. }
  280. func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
  281. return child.messages
  282. }
  283. func (child *partitionConsumer) Errors() <-chan *ConsumerError {
  284. return child.errors
  285. }
  286. func (child *partitionConsumer) AsyncClose() {
  287. // this triggers whatever worker owns this child to abandon it and close its trigger channel, which causes
  288. // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
  289. // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
  290. // also just close itself)
  291. close(child.dying)
  292. }
  293. func (child *partitionConsumer) Close() error {
  294. child.AsyncClose()
  295. go withRecover(func() {
  296. for _ = range child.messages {
  297. // drain
  298. }
  299. })
  300. var errors ConsumerErrors
  301. for err := range child.errors {
  302. errors = append(errors, err)
  303. }
  304. if len(errors) > 0 {
  305. return errors
  306. }
  307. return nil
  308. }
  309. func (child *partitionConsumer) handleResponse(response *FetchResponse) error {
  310. block := response.GetBlock(child.topic, child.partition)
  311. if block == nil {
  312. return ErrIncompleteResponse
  313. }
  314. if block.Err != ErrNoError {
  315. return block.Err
  316. }
  317. if len(block.MsgSet.Messages) == 0 {
  318. // We got no messages. If we got a trailing one then we need to ask for more data.
  319. // Otherwise we just poll again and wait for one to be produced...
  320. if block.MsgSet.PartialTrailingMessage {
  321. if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
  322. // we can't ask for more data, we've hit the configured limit
  323. child.sendError(ErrMessageTooLarge)
  324. child.offset++ // skip this one so we can keep processing future messages
  325. } else {
  326. child.fetchSize *= 2
  327. if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
  328. child.fetchSize = child.conf.Consumer.Fetch.Max
  329. }
  330. }
  331. }
  332. return nil
  333. }
  334. // we got messages, reset our fetch size in case it was increased for a previous request
  335. child.fetchSize = child.conf.Consumer.Fetch.Default
  336. incomplete := false
  337. atLeastOne := false
  338. prelude := true
  339. for _, msgBlock := range block.MsgSet.Messages {
  340. for _, msg := range msgBlock.Messages() {
  341. if prelude && msg.Offset < child.offset {
  342. continue
  343. }
  344. prelude = false
  345. if msg.Offset >= child.offset {
  346. atLeastOne = true
  347. child.messages <- &ConsumerMessage{
  348. Topic: child.topic,
  349. Partition: child.partition,
  350. Key: msg.Msg.Key,
  351. Value: msg.Msg.Value,
  352. Offset: msg.Offset,
  353. }
  354. child.offset = msg.Offset + 1
  355. } else {
  356. incomplete = true
  357. }
  358. }
  359. }
  360. if incomplete || !atLeastOne {
  361. return ErrIncompleteResponse
  362. }
  363. return nil
  364. }
  365. // brokerConsumer
  366. type brokerConsumer struct {
  367. consumer *consumer
  368. broker *Broker
  369. input chan *partitionConsumer
  370. newSubscriptions chan []*partitionConsumer
  371. wait chan none
  372. subscriptions map[*partitionConsumer]none
  373. refs int
  374. }
  375. func (w *brokerConsumer) subscriptionManager() {
  376. var buffer []*partitionConsumer
  377. // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
  378. // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
  379. // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
  380. // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
  381. // so the main goroutine can block waiting for work if it has none.
  382. for {
  383. if len(buffer) > 0 {
  384. select {
  385. case event, ok := <-w.input:
  386. if !ok {
  387. goto done
  388. }
  389. buffer = append(buffer, event)
  390. case w.newSubscriptions <- buffer:
  391. buffer = nil
  392. case w.wait <- none{}:
  393. }
  394. } else {
  395. select {
  396. case event, ok := <-w.input:
  397. if !ok {
  398. goto done
  399. }
  400. buffer = append(buffer, event)
  401. case w.newSubscriptions <- nil:
  402. }
  403. }
  404. }
  405. done:
  406. close(w.wait)
  407. if len(buffer) > 0 {
  408. w.newSubscriptions <- buffer
  409. }
  410. close(w.newSubscriptions)
  411. }
  412. func (w *brokerConsumer) subscriptionConsumer() {
  413. <-w.wait // wait for our first piece of work
  414. // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
  415. for newSubscriptions := range w.newSubscriptions {
  416. w.updateSubscriptionCache(newSubscriptions)
  417. if len(w.subscriptions) == 0 {
  418. // We're about to be shut down or we're about to receive more subscriptions.
  419. // Either way, the signal just hasn't propagated to our goroutine yet.
  420. <-w.wait
  421. continue
  422. }
  423. response, err := w.fetchNewMessages()
  424. if err != nil {
  425. Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", w.broker.ID(), err)
  426. w.abort(err)
  427. return
  428. }
  429. for child := range w.subscriptions {
  430. if err := child.handleResponse(response); err != nil {
  431. switch err {
  432. case ErrOffsetOutOfRange:
  433. // there's no point in retrying this it will just fail the same way again
  434. // so shut it down and force the user to choose what to do
  435. child.AsyncClose()
  436. fallthrough
  437. default:
  438. child.sendError(err)
  439. fallthrough
  440. case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable:
  441. // these three are not fatal errors, but do require redispatching
  442. child.trigger <- none{}
  443. delete(w.subscriptions, child)
  444. Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", w.broker.ID(), child.topic, child.partition, err)
  445. }
  446. }
  447. }
  448. }
  449. }
  450. func (w *brokerConsumer) updateSubscriptionCache(newSubscriptions []*partitionConsumer) {
  451. // take new subscriptions, and abandon subscriptions that have been closed
  452. for _, child := range newSubscriptions {
  453. w.subscriptions[child] = none{}
  454. Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", w.broker.ID(), child.topic, child.partition)
  455. }
  456. for child := range w.subscriptions {
  457. select {
  458. case <-child.dying:
  459. close(child.trigger)
  460. delete(w.subscriptions, child)
  461. Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", w.broker.ID(), child.topic, child.partition)
  462. default:
  463. }
  464. }
  465. }
  466. func (w *brokerConsumer) abort(err error) {
  467. w.consumer.abandonBrokerConsumer(w)
  468. _ = w.broker.Close() // we don't care about the error this might return, we already have one
  469. for child := range w.subscriptions {
  470. child.sendError(err)
  471. child.trigger <- none{}
  472. }
  473. for newSubscription := range w.newSubscriptions {
  474. for _, child := range newSubscription {
  475. child.sendError(err)
  476. child.trigger <- none{}
  477. }
  478. }
  479. }
  480. func (w *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
  481. request := &FetchRequest{
  482. MinBytes: w.consumer.conf.Consumer.Fetch.Min,
  483. MaxWaitTime: int32(w.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
  484. }
  485. for child := range w.subscriptions {
  486. request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
  487. }
  488. return w.broker.Fetch(request)
  489. }