consumer_test.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. package sarama
  2. import (
  3. "log"
  4. "os"
  5. "os/signal"
  6. "sync"
  7. "testing"
  8. "time"
  9. )
  10. func TestConsumerOffsetManual(t *testing.T) {
  11. seedBroker := newMockBroker(t, 1)
  12. leader := newMockBroker(t, 2)
  13. metadataResponse := new(MetadataResponse)
  14. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  15. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  16. seedBroker.Returns(metadataResponse)
  17. for i := 0; i <= 10; i++ {
  18. fetchResponse := new(FetchResponse)
  19. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+1234))
  20. leader.Returns(fetchResponse)
  21. }
  22. master, err := NewConsumer([]string{seedBroker.Addr()}, nil)
  23. if err != nil {
  24. t.Fatal(err)
  25. }
  26. consumer, err := master.ConsumePartition("my_topic", 0, 1234)
  27. if err != nil {
  28. t.Fatal(err)
  29. }
  30. seedBroker.Close()
  31. for i := 0; i < 10; i++ {
  32. select {
  33. case message := <-consumer.Messages():
  34. if message.Offset != int64(i+1234) {
  35. t.Error("Incorrect message offset!")
  36. }
  37. case err := <-consumer.Errors():
  38. t.Error(err)
  39. }
  40. }
  41. safeClose(t, consumer)
  42. safeClose(t, master)
  43. leader.Close()
  44. }
  45. func TestConsumerLatestOffset(t *testing.T) {
  46. seedBroker := newMockBroker(t, 1)
  47. leader := newMockBroker(t, 2)
  48. metadataResponse := new(MetadataResponse)
  49. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  50. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  51. seedBroker.Returns(metadataResponse)
  52. offsetResponse := new(OffsetResponse)
  53. offsetResponse.AddTopicPartition("my_topic", 0, 0x010101)
  54. leader.Returns(offsetResponse)
  55. fetchResponse := new(FetchResponse)
  56. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), 0x010101)
  57. leader.Returns(fetchResponse)
  58. master, err := NewConsumer([]string{seedBroker.Addr()}, nil)
  59. if err != nil {
  60. t.Fatal(err)
  61. }
  62. seedBroker.Close()
  63. consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
  64. if err != nil {
  65. t.Fatal(err)
  66. }
  67. leader.Close()
  68. safeClose(t, consumer)
  69. safeClose(t, master)
  70. // we deliver one message, so it should be one higher than we return in the OffsetResponse
  71. if consumer.(*partitionConsumer).offset != 0x010102 {
  72. t.Error("Latest offset not fetched correctly:", consumer.(*partitionConsumer).offset)
  73. }
  74. }
  75. func TestConsumerFunnyOffsets(t *testing.T) {
  76. // for topics that are compressed and/or compacted (different things!) we have to be
  77. // able to handle receiving offsets that are non-sequential (though still strictly increasing) and
  78. // possibly starting prior to the actual value we requested
  79. seedBroker := newMockBroker(t, 1)
  80. leader := newMockBroker(t, 2)
  81. metadataResponse := new(MetadataResponse)
  82. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  83. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  84. seedBroker.Returns(metadataResponse)
  85. fetchResponse := new(FetchResponse)
  86. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(1))
  87. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(3))
  88. leader.Returns(fetchResponse)
  89. fetchResponse = new(FetchResponse)
  90. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(5))
  91. leader.Returns(fetchResponse)
  92. master, err := NewConsumer([]string{seedBroker.Addr()}, nil)
  93. if err != nil {
  94. t.Fatal(err)
  95. }
  96. consumer, err := master.ConsumePartition("my_topic", 0, 2)
  97. if err != nil {
  98. t.Fatal(err)
  99. }
  100. message := <-consumer.Messages()
  101. if message.Offset != 3 {
  102. t.Error("Incorrect message offset!")
  103. }
  104. leader.Close()
  105. seedBroker.Close()
  106. safeClose(t, consumer)
  107. safeClose(t, master)
  108. }
  109. func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
  110. // initial setup
  111. seedBroker := newMockBroker(t, 1)
  112. leader0 := newMockBroker(t, 2)
  113. leader1 := newMockBroker(t, 3)
  114. metadataResponse := new(MetadataResponse)
  115. metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID())
  116. metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID())
  117. metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError)
  118. metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
  119. seedBroker.Returns(metadataResponse)
  120. // launch test goroutines
  121. config := NewConfig()
  122. config.Consumer.Retry.Backoff = 0
  123. master, err := NewConsumer([]string{seedBroker.Addr()}, config)
  124. if err != nil {
  125. t.Fatal(err)
  126. }
  127. // we expect to end up (eventually) consuming exactly ten messages on each partition
  128. var wg sync.WaitGroup
  129. for i := 0; i < 2; i++ {
  130. consumer, err := master.ConsumePartition("my_topic", int32(i), 0)
  131. if err != nil {
  132. t.Error(err)
  133. }
  134. go func(c PartitionConsumer) {
  135. for err := range c.Errors() {
  136. t.Error(err)
  137. }
  138. }(consumer)
  139. wg.Add(1)
  140. go func(partition int32, c PartitionConsumer) {
  141. for i := 0; i < 10; i++ {
  142. message := <-consumer.Messages()
  143. if message.Offset != int64(i) {
  144. t.Error("Incorrect message offset!", i, partition, message.Offset)
  145. }
  146. if message.Partition != partition {
  147. t.Error("Incorrect message partition!")
  148. }
  149. }
  150. safeClose(t, consumer)
  151. wg.Done()
  152. }(int32(i), consumer)
  153. }
  154. // leader0 provides first four messages on partition 0
  155. fetchResponse := new(FetchResponse)
  156. for i := 0; i < 4; i++ {
  157. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i))
  158. }
  159. leader0.Returns(fetchResponse)
  160. // leader0 says no longer leader of partition 0
  161. fetchResponse = new(FetchResponse)
  162. fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
  163. leader0.Returns(fetchResponse)
  164. // metadata assigns both partitions to leader1
  165. metadataResponse = new(MetadataResponse)
  166. metadataResponse.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
  167. metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
  168. seedBroker.Returns(metadataResponse)
  169. time.Sleep(50 * time.Millisecond) // dumbest way to force a particular response ordering
  170. // leader1 provides five messages on partition 1
  171. fetchResponse = new(FetchResponse)
  172. for i := 0; i < 5; i++ {
  173. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i))
  174. }
  175. leader1.Returns(fetchResponse)
  176. // leader1 provides three more messages on both partitions
  177. fetchResponse = new(FetchResponse)
  178. for i := 0; i < 3; i++ {
  179. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+4))
  180. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+5))
  181. }
  182. leader1.Returns(fetchResponse)
  183. // leader1 provides three more messages on partition0, says no longer leader of partition1
  184. fetchResponse = new(FetchResponse)
  185. for i := 0; i < 3; i++ {
  186. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+7))
  187. }
  188. fetchResponse.AddError("my_topic", 1, ErrNotLeaderForPartition)
  189. leader1.Returns(fetchResponse)
  190. // metadata assigns 0 to leader1 and 1 to leader0
  191. metadataResponse = new(MetadataResponse)
  192. metadataResponse.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
  193. metadataResponse.AddTopicPartition("my_topic", 1, leader0.BrokerID(), nil, nil, ErrNoError)
  194. seedBroker.Returns(metadataResponse)
  195. time.Sleep(50 * time.Millisecond) // dumbest way to force a particular response ordering
  196. // leader0 provides two messages on partition 1
  197. fetchResponse = new(FetchResponse)
  198. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(8))
  199. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(9))
  200. leader0.Returns(fetchResponse)
  201. // leader0 provides last message on partition 1
  202. fetchResponse = new(FetchResponse)
  203. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(10))
  204. leader0.Returns(fetchResponse)
  205. // leader1 provides last message on partition 0
  206. fetchResponse = new(FetchResponse)
  207. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(10))
  208. leader1.Returns(fetchResponse)
  209. wg.Wait()
  210. leader1.Close()
  211. leader0.Close()
  212. seedBroker.Close()
  213. safeClose(t, master)
  214. }
  215. func TestConsumerInterleavedClose(t *testing.T) {
  216. t.Skip("Enable once bug #325 is fixed.")
  217. seedBroker := newMockBroker(t, 1)
  218. leader := newMockBroker(t, 2)
  219. metadataResponse := new(MetadataResponse)
  220. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  221. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  222. metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
  223. seedBroker.Returns(metadataResponse)
  224. config := NewConfig()
  225. config.ChannelBufferSize = 0
  226. master, err := NewConsumer([]string{seedBroker.Addr()}, config)
  227. if err != nil {
  228. t.Fatal(err)
  229. }
  230. c0, err := master.ConsumePartition("my_topic", 0, 0)
  231. if err != nil {
  232. t.Fatal(err)
  233. }
  234. fetchResponse := new(FetchResponse)
  235. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(0))
  236. leader.Returns(fetchResponse)
  237. c1, err := master.ConsumePartition("my_topic", 1, 0)
  238. if err != nil {
  239. t.Fatal(err)
  240. }
  241. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(0))
  242. leader.Returns(fetchResponse)
  243. safeClose(t, c1)
  244. safeClose(t, c0)
  245. safeClose(t, master)
  246. leader.Close()
  247. seedBroker.Close()
  248. }
  249. func TestConsumerBounceWithReferenceOpen(t *testing.T) {
  250. seedBroker := newMockBroker(t, 1)
  251. leader := newMockBroker(t, 2)
  252. leaderAddr := leader.Addr()
  253. metadataResponse := new(MetadataResponse)
  254. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  255. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  256. metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
  257. seedBroker.Returns(metadataResponse)
  258. config := NewConfig()
  259. config.Consumer.Return.Errors = true
  260. config.Consumer.Retry.Backoff = 0
  261. config.ChannelBufferSize = 0
  262. master, err := NewConsumer([]string{seedBroker.Addr()}, config)
  263. if err != nil {
  264. t.Fatal(err)
  265. }
  266. c0, err := master.ConsumePartition("my_topic", 0, 0)
  267. if err != nil {
  268. t.Fatal(err)
  269. }
  270. c1, err := master.ConsumePartition("my_topic", 1, 0)
  271. if err != nil {
  272. t.Fatal(err)
  273. }
  274. fetchResponse := new(FetchResponse)
  275. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(0))
  276. fetchResponse.AddError("my_topic", 1, ErrNoError)
  277. leader.Returns(fetchResponse)
  278. <-c0.Messages()
  279. fetchResponse = new(FetchResponse)
  280. fetchResponse.AddError("my_topic", 0, ErrNoError)
  281. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(0))
  282. leader.Returns(fetchResponse)
  283. <-c1.Messages()
  284. leader.Close()
  285. leader = newMockBrokerAddr(t, 2, leaderAddr)
  286. // unblock one of the two (it doesn't matter which)
  287. select {
  288. case <-c0.Errors():
  289. case <-c1.Errors():
  290. }
  291. // send it back to the same broker
  292. seedBroker.Returns(metadataResponse)
  293. fetchResponse = new(FetchResponse)
  294. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(1))
  295. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(1))
  296. leader.Returns(fetchResponse)
  297. time.Sleep(5 * time.Millisecond)
  298. // unblock the other one
  299. select {
  300. case <-c0.Errors():
  301. case <-c1.Errors():
  302. }
  303. // send it back to the same broker
  304. seedBroker.Returns(metadataResponse)
  305. select {
  306. case <-c0.Messages():
  307. case <-c1.Messages():
  308. }
  309. leader.Close()
  310. seedBroker.Close()
  311. wg := sync.WaitGroup{}
  312. wg.Add(2)
  313. go func() {
  314. _ = c0.Close()
  315. wg.Done()
  316. }()
  317. go func() {
  318. _ = c1.Close()
  319. wg.Done()
  320. }()
  321. wg.Wait()
  322. safeClose(t, master)
  323. }
  324. // This example has the simplest use case of the consumer. It simply
  325. // iterates over the messages channel using a for/range loop. Because
  326. // a producer never stopsunless requested, a signal handler is registered
  327. // so we can trigger a clean shutdown of the consumer.
  328. func ExampleConsumer_for_loop() {
  329. master, err := NewConsumer([]string{"localhost:9092"}, nil)
  330. if err != nil {
  331. log.Fatalln(err)
  332. }
  333. defer func() {
  334. if err := master.Close(); err != nil {
  335. log.Fatalln(err)
  336. }
  337. }()
  338. consumer, err := master.ConsumePartition("my_topic", 0, 0)
  339. if err != nil {
  340. log.Fatalln(err)
  341. }
  342. go func() {
  343. // By default, the consumer will always keep going, unless we tell it to stop.
  344. // In this case, we capture the SIGINT signal so we can tell the consumer to stop
  345. signals := make(chan os.Signal, 1)
  346. signal.Notify(signals, os.Interrupt)
  347. <-signals
  348. consumer.AsyncClose()
  349. }()
  350. msgCount := 0
  351. for message := range consumer.Messages() {
  352. log.Println(string(message.Value))
  353. msgCount++
  354. }
  355. log.Println("Processed", msgCount, "messages.")
  356. }
  357. // This example shows how to use a consumer with a select statement
  358. // dealing with the different channels.
  359. func ExampleConsumer_select() {
  360. config := NewConfig()
  361. config.Consumer.Return.Errors = true // Handle errors manually instead of letting Sarama log them.
  362. master, err := NewConsumer([]string{"localhost:9092"}, config)
  363. if err != nil {
  364. log.Fatalln(err)
  365. }
  366. defer func() {
  367. if err := master.Close(); err != nil {
  368. log.Fatalln(err)
  369. }
  370. }()
  371. consumer, err := master.ConsumePartition("my_topic", 0, 0)
  372. if err != nil {
  373. log.Fatalln(err)
  374. }
  375. defer func() {
  376. if err := consumer.Close(); err != nil {
  377. log.Fatalln(err)
  378. }
  379. }()
  380. msgCount := 0
  381. signals := make(chan os.Signal, 1)
  382. signal.Notify(signals, os.Interrupt)
  383. consumerLoop:
  384. for {
  385. select {
  386. case err := <-consumer.Errors():
  387. log.Println(err)
  388. case <-consumer.Messages():
  389. msgCount++
  390. case <-signals:
  391. log.Println("Received interrupt")
  392. break consumerLoop
  393. }
  394. }
  395. log.Println("Processed", msgCount, "messages.")
  396. }
  397. // This example shows how to use a consumer with different goroutines
  398. // to read from the Messages and Errors channels.
  399. func ExampleConsumer_goroutines() {
  400. config := NewConfig()
  401. config.Consumer.Return.Errors = true // Handle errors manually instead of letting Sarama log them.
  402. master, err := NewConsumer([]string{"localhost:9092"}, config)
  403. if err != nil {
  404. log.Fatalln(err)
  405. }
  406. defer func() {
  407. if err := master.Close(); err != nil {
  408. panic(err)
  409. }
  410. }()
  411. consumer, err := master.ConsumePartition("my_topic", 0, OffsetOldest)
  412. if err != nil {
  413. log.Fatalln(err)
  414. }
  415. var (
  416. wg sync.WaitGroup
  417. msgCount int
  418. )
  419. wg.Add(1)
  420. go func() {
  421. defer wg.Done()
  422. for message := range consumer.Messages() {
  423. log.Printf("Consumed message with offset %d", message.Offset)
  424. msgCount++
  425. }
  426. }()
  427. wg.Add(1)
  428. go func() {
  429. defer wg.Done()
  430. for err := range consumer.Errors() {
  431. log.Println(err)
  432. }
  433. }()
  434. // Wait for an interrupt signal to trigger the shutdown
  435. signals := make(chan os.Signal, 1)
  436. signal.Notify(signals, os.Interrupt)
  437. <-signals
  438. consumer.AsyncClose()
  439. // Wait for the Messages and Errors channel to be fully drained.
  440. wg.Wait()
  441. log.Println("Processed", msgCount, "messages.")
  442. }