consumer_test.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. package sarama
  2. import (
  3. "log"
  4. "os"
  5. "os/signal"
  6. "sync"
  7. "testing"
  8. "time"
  9. )
  10. func TestConsumerOffsetManual(t *testing.T) {
  11. seedBroker := newMockBroker(t, 1)
  12. leader := newMockBroker(t, 2)
  13. metadataResponse := new(MetadataResponse)
  14. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  15. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  16. seedBroker.Returns(metadataResponse)
  17. for i := 0; i <= 10; i++ {
  18. fetchResponse := new(FetchResponse)
  19. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+1234))
  20. leader.Returns(fetchResponse)
  21. }
  22. master, err := NewConsumer([]string{seedBroker.Addr()}, nil)
  23. if err != nil {
  24. t.Fatal(err)
  25. }
  26. consumer, err := master.ConsumePartition("my_topic", 0, 1234)
  27. if err != nil {
  28. t.Fatal(err)
  29. }
  30. seedBroker.Close()
  31. for i := 0; i < 10; i++ {
  32. select {
  33. case message := <-consumer.Messages():
  34. if message.Offset != int64(i+1234) {
  35. t.Error("Incorrect message offset!")
  36. }
  37. case err := <-consumer.Errors():
  38. t.Error(err)
  39. }
  40. }
  41. safeClose(t, consumer)
  42. leader.Close()
  43. }
  44. func TestConsumerLatestOffset(t *testing.T) {
  45. seedBroker := newMockBroker(t, 1)
  46. leader := newMockBroker(t, 2)
  47. metadataResponse := new(MetadataResponse)
  48. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  49. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  50. seedBroker.Returns(metadataResponse)
  51. offsetResponse := new(OffsetResponse)
  52. offsetResponse.AddTopicPartition("my_topic", 0, 0x010101)
  53. leader.Returns(offsetResponse)
  54. fetchResponse := new(FetchResponse)
  55. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), 0x010101)
  56. leader.Returns(fetchResponse)
  57. master, err := NewConsumer([]string{seedBroker.Addr()}, nil)
  58. if err != nil {
  59. t.Fatal(err)
  60. }
  61. seedBroker.Close()
  62. consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
  63. if err != nil {
  64. t.Fatal(err)
  65. }
  66. leader.Close()
  67. safeClose(t, consumer)
  68. // we deliver one message, so it should be one higher than we return in the OffsetResponse
  69. if consumer.(*partitionConsumer).offset != 0x010102 {
  70. t.Error("Latest offset not fetched correctly:", consumer.(*partitionConsumer).offset)
  71. }
  72. }
  73. func TestConsumerFunnyOffsets(t *testing.T) {
  74. // for topics that are compressed and/or compacted (different things!) we have to be
  75. // able to handle receiving offsets that are non-sequential (though still strictly increasing) and
  76. // possibly starting prior to the actual value we requested
  77. seedBroker := newMockBroker(t, 1)
  78. leader := newMockBroker(t, 2)
  79. metadataResponse := new(MetadataResponse)
  80. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  81. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  82. seedBroker.Returns(metadataResponse)
  83. fetchResponse := new(FetchResponse)
  84. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(1))
  85. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(3))
  86. leader.Returns(fetchResponse)
  87. fetchResponse = new(FetchResponse)
  88. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(5))
  89. leader.Returns(fetchResponse)
  90. master, err := NewConsumer([]string{seedBroker.Addr()}, nil)
  91. if err != nil {
  92. t.Fatal(err)
  93. }
  94. consumer, err := master.ConsumePartition("my_topic", 0, 2)
  95. if err != nil {
  96. t.Fatal(err)
  97. }
  98. message := <-consumer.Messages()
  99. if message.Offset != 3 {
  100. t.Error("Incorrect message offset!")
  101. }
  102. leader.Close()
  103. seedBroker.Close()
  104. safeClose(t, consumer)
  105. }
  106. func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
  107. // initial setup
  108. seedBroker := newMockBroker(t, 1)
  109. leader0 := newMockBroker(t, 2)
  110. leader1 := newMockBroker(t, 3)
  111. metadataResponse := new(MetadataResponse)
  112. metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID())
  113. metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID())
  114. metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError)
  115. metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
  116. seedBroker.Returns(metadataResponse)
  117. // launch test goroutines
  118. config := NewConfig()
  119. config.Consumer.Retry.Backoff = 0
  120. master, err := NewConsumer([]string{seedBroker.Addr()}, config)
  121. if err != nil {
  122. t.Fatal(err)
  123. }
  124. // we expect to end up (eventually) consuming exactly ten messages on each partition
  125. var wg sync.WaitGroup
  126. for i := 0; i < 2; i++ {
  127. consumer, err := master.ConsumePartition("my_topic", int32(i), 0)
  128. if err != nil {
  129. t.Error(err)
  130. }
  131. go func(c PartitionConsumer) {
  132. for err := range c.Errors() {
  133. t.Error(err)
  134. }
  135. }(consumer)
  136. wg.Add(1)
  137. go func(partition int32, c PartitionConsumer) {
  138. for i := 0; i < 10; i++ {
  139. message := <-consumer.Messages()
  140. if message.Offset != int64(i) {
  141. t.Error("Incorrect message offset!", i, partition, message.Offset)
  142. }
  143. if message.Partition != partition {
  144. t.Error("Incorrect message partition!")
  145. }
  146. }
  147. safeClose(t, consumer)
  148. wg.Done()
  149. }(int32(i), consumer)
  150. }
  151. // leader0 provides first four messages on partition 0
  152. fetchResponse := new(FetchResponse)
  153. for i := 0; i < 4; i++ {
  154. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i))
  155. }
  156. leader0.Returns(fetchResponse)
  157. // leader0 says no longer leader of partition 0
  158. fetchResponse = new(FetchResponse)
  159. fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
  160. leader0.Returns(fetchResponse)
  161. // metadata assigns both partitions to leader1
  162. metadataResponse = new(MetadataResponse)
  163. metadataResponse.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
  164. metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
  165. seedBroker.Returns(metadataResponse)
  166. time.Sleep(5 * time.Millisecond) // dumbest way to force a particular response ordering
  167. // leader1 provides five messages on partition 1
  168. fetchResponse = new(FetchResponse)
  169. for i := 0; i < 5; i++ {
  170. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i))
  171. }
  172. leader1.Returns(fetchResponse)
  173. // leader1 provides three more messages on both partitions
  174. fetchResponse = new(FetchResponse)
  175. for i := 0; i < 3; i++ {
  176. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+4))
  177. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+5))
  178. }
  179. leader1.Returns(fetchResponse)
  180. // leader1 provides three more messages on partition0, says no longer leader of partition1
  181. fetchResponse = new(FetchResponse)
  182. for i := 0; i < 3; i++ {
  183. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+7))
  184. }
  185. fetchResponse.AddError("my_topic", 1, ErrNotLeaderForPartition)
  186. leader1.Returns(fetchResponse)
  187. // metadata assigns 0 to leader1 and 1 to leader0
  188. metadataResponse = new(MetadataResponse)
  189. metadataResponse.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
  190. metadataResponse.AddTopicPartition("my_topic", 1, leader0.BrokerID(), nil, nil, ErrNoError)
  191. seedBroker.Returns(metadataResponse)
  192. time.Sleep(5 * time.Millisecond) // dumbest way to force a particular response ordering
  193. // leader0 provides two messages on partition 1
  194. fetchResponse = new(FetchResponse)
  195. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(8))
  196. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(9))
  197. leader0.Returns(fetchResponse)
  198. // leader0 provides last message on partition 1
  199. fetchResponse = new(FetchResponse)
  200. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(10))
  201. leader0.Returns(fetchResponse)
  202. // leader1 provides last message on partition 0
  203. fetchResponse = new(FetchResponse)
  204. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(10))
  205. leader1.Returns(fetchResponse)
  206. wg.Wait()
  207. leader1.Close()
  208. leader0.Close()
  209. seedBroker.Close()
  210. }
  211. func TestConsumerInterleavedClose(t *testing.T) {
  212. t.Skip("Enable once bug #325 is fixed.")
  213. seedBroker := newMockBroker(t, 1)
  214. leader := newMockBroker(t, 2)
  215. metadataResponse := new(MetadataResponse)
  216. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  217. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  218. metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
  219. seedBroker.Returns(metadataResponse)
  220. config := NewConfig()
  221. config.ChannelBufferSize = 0
  222. master, err := NewConsumer([]string{seedBroker.Addr()}, config)
  223. if err != nil {
  224. t.Fatal(err)
  225. }
  226. c0, err := master.ConsumePartition("my_topic", 0, 0)
  227. if err != nil {
  228. t.Fatal(err)
  229. }
  230. fetchResponse := new(FetchResponse)
  231. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(0))
  232. leader.Returns(fetchResponse)
  233. c1, err := master.ConsumePartition("my_topic", 1, 0)
  234. if err != nil {
  235. t.Fatal(err)
  236. }
  237. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(0))
  238. leader.Returns(fetchResponse)
  239. safeClose(t, c1)
  240. safeClose(t, c0)
  241. leader.Close()
  242. seedBroker.Close()
  243. }
  244. // This example has the simplest use case of the consumer. It simply
  245. // iterates over the messages channel using a for/range loop. Because
  246. // a producer never stopsunless requested, a signal handler is registered
  247. // so we can trigger a clean shutdown of the consumer.
  248. func ExampleConsumer_for_loop() {
  249. master, err := NewConsumer([]string{"localhost:9092"}, nil)
  250. if err != nil {
  251. log.Fatalln(err)
  252. }
  253. defer func() {
  254. if err := master.Close(); err != nil {
  255. log.Fatalln(err)
  256. }
  257. }()
  258. consumer, err := master.ConsumePartition("my_topic", 0, 0)
  259. if err != nil {
  260. log.Fatalln(err)
  261. }
  262. go func() {
  263. // By default, the consumer will always keep going, unless we tell it to stop.
  264. // In this case, we capture the SIGINT signal so we can tell the consumer to stop
  265. signals := make(chan os.Signal, 1)
  266. signal.Notify(signals, os.Interrupt)
  267. <-signals
  268. consumer.AsyncClose()
  269. }()
  270. msgCount := 0
  271. for message := range consumer.Messages() {
  272. log.Println(string(message.Value))
  273. msgCount++
  274. }
  275. log.Println("Processed", msgCount, "messages.")
  276. }
  277. // This example shows how to use a consumer with a select statement
  278. // dealing with the different channels.
  279. func ExampleConsumer_select() {
  280. config := NewConfig()
  281. config.Consumer.ReturnErrors = true // Handle errors manually instead of letting Sarama log them.
  282. master, err := NewConsumer([]string{"localhost:9092"}, config)
  283. if err != nil {
  284. log.Fatalln(err)
  285. }
  286. defer func() {
  287. if err := master.Close(); err != nil {
  288. log.Fatalln(err)
  289. }
  290. }()
  291. consumer, err := master.ConsumePartition("my_topic", 0, 0)
  292. if err != nil {
  293. log.Fatalln(err)
  294. }
  295. defer func() {
  296. if err := consumer.Close(); err != nil {
  297. log.Fatalln(err)
  298. }
  299. }()
  300. msgCount := 0
  301. signals := make(chan os.Signal, 1)
  302. signal.Notify(signals, os.Interrupt)
  303. consumerLoop:
  304. for {
  305. select {
  306. case err := <-consumer.Errors():
  307. log.Println(err)
  308. case <-consumer.Messages():
  309. msgCount++
  310. case <-signals:
  311. log.Println("Received interrupt")
  312. break consumerLoop
  313. }
  314. }
  315. log.Println("Processed", msgCount, "messages.")
  316. }
  317. // This example shows how to use a consumer with different goroutines
  318. // to read from the Messages and Errors channels.
  319. func ExampleConsumer_goroutines() {
  320. config := NewConfig()
  321. config.Consumer.ReturnErrors = true // Handle errors manually instead of letting Sarama log them.
  322. master, err := NewConsumer([]string{"localhost:9092"}, config)
  323. if err != nil {
  324. log.Fatalln(err)
  325. }
  326. defer func() {
  327. if err := master.Close(); err != nil {
  328. panic(err)
  329. }
  330. }()
  331. consumer, err := master.ConsumePartition("my_topic", 0, OffsetOldest)
  332. if err != nil {
  333. log.Fatalln(err)
  334. }
  335. var (
  336. wg sync.WaitGroup
  337. msgCount int
  338. )
  339. wg.Add(1)
  340. go func() {
  341. defer wg.Done()
  342. for message := range consumer.Messages() {
  343. log.Printf("Consumed message with offset %d", message.Offset)
  344. msgCount++
  345. }
  346. }()
  347. wg.Add(1)
  348. go func() {
  349. defer wg.Done()
  350. for err := range consumer.Errors() {
  351. log.Println(err)
  352. }
  353. }()
  354. // Wait for an interrupt signal to trigger the shutdown
  355. signals := make(chan os.Signal, 1)
  356. signal.Notify(signals, os.Interrupt)
  357. <-signals
  358. consumer.AsyncClose()
  359. // Wait for the Messages and Errors channel to be fully drained.
  360. wg.Wait()
  361. log.Println("Processed", msgCount, "messages.")
  362. }