consumer_test.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891
  1. package sarama
  2. import (
  3. "log"
  4. "os"
  5. "os/signal"
  6. "sync"
  7. "testing"
  8. "time"
  9. )
  10. var testMsg = StringEncoder("Foo")
  11. // If a particular offset is provided then messages are consumed starting from
  12. // that offset.
  13. func TestConsumerOffsetManual(t *testing.T) {
  14. // Given
  15. broker0 := newMockBroker(t, 0)
  16. mockFetchResponse := newMockFetchResponse(t, 1)
  17. for i := 0; i < 10; i++ {
  18. mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg)
  19. }
  20. broker0.SetHandlerByMap(map[string]MockResponse{
  21. "MetadataRequest": newMockMetadataResponse(t).
  22. SetBroker(broker0.Addr(), broker0.BrokerID()).
  23. SetLeader("my_topic", 0, broker0.BrokerID()),
  24. "OffsetRequest": newMockOffsetResponse(t).
  25. SetOffset("my_topic", 0, OffsetOldest, 0).
  26. SetOffset("my_topic", 0, OffsetNewest, 2345),
  27. "FetchRequest": mockFetchResponse,
  28. })
  29. // When
  30. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  31. if err != nil {
  32. t.Fatal(err)
  33. }
  34. consumer, err := master.ConsumePartition("my_topic", 0, 1234)
  35. if err != nil {
  36. t.Fatal(err)
  37. }
  38. // Then: messages starting from offset 1234 are consumed.
  39. for i := 0; i < 10; i++ {
  40. select {
  41. case message := <-consumer.Messages():
  42. assertMessageOffset(t, message, int64(i+1234))
  43. case err := <-consumer.Errors():
  44. t.Error(err)
  45. }
  46. }
  47. safeClose(t, consumer)
  48. safeClose(t, master)
  49. broker0.Close()
  50. }
  51. // If `OffsetNewest` is passed as the initial offset then the first consumed
  52. // message is indeed corresponds to the offset that broker claims to be the
  53. // newest in its metadata response.
  54. func TestConsumerOffsetNewest(t *testing.T) {
  55. // Given
  56. broker0 := newMockBroker(t, 0)
  57. broker0.SetHandlerByMap(map[string]MockResponse{
  58. "MetadataRequest": newMockMetadataResponse(t).
  59. SetBroker(broker0.Addr(), broker0.BrokerID()).
  60. SetLeader("my_topic", 0, broker0.BrokerID()),
  61. "OffsetRequest": newMockOffsetResponse(t).
  62. SetOffset("my_topic", 0, OffsetNewest, 10).
  63. SetOffset("my_topic", 0, OffsetOldest, 7),
  64. "FetchRequest": newMockFetchResponse(t, 1).
  65. SetMessage("my_topic", 0, 9, testMsg).
  66. SetMessage("my_topic", 0, 10, testMsg).
  67. SetMessage("my_topic", 0, 11, testMsg).
  68. SetHighWaterMark("my_topic", 0, 14),
  69. })
  70. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  71. if err != nil {
  72. t.Fatal(err)
  73. }
  74. // When
  75. consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
  76. if err != nil {
  77. t.Fatal(err)
  78. }
  79. // Then
  80. assertMessageOffset(t, <-consumer.Messages(), 10)
  81. if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 {
  82. t.Errorf("Expected high water mark offset 14, found %d", hwmo)
  83. }
  84. safeClose(t, consumer)
  85. safeClose(t, master)
  86. broker0.Close()
  87. }
  88. // It is possible to close a partition consumer and create the same anew.
  89. func TestConsumerRecreate(t *testing.T) {
  90. // Given
  91. broker0 := newMockBroker(t, 0)
  92. broker0.SetHandlerByMap(map[string]MockResponse{
  93. "MetadataRequest": newMockMetadataResponse(t).
  94. SetBroker(broker0.Addr(), broker0.BrokerID()).
  95. SetLeader("my_topic", 0, broker0.BrokerID()),
  96. "OffsetRequest": newMockOffsetResponse(t).
  97. SetOffset("my_topic", 0, OffsetOldest, 0).
  98. SetOffset("my_topic", 0, OffsetNewest, 1000),
  99. "FetchRequest": newMockFetchResponse(t, 1).
  100. SetMessage("my_topic", 0, 10, testMsg),
  101. })
  102. c, err := NewConsumer([]string{broker0.Addr()}, nil)
  103. if err != nil {
  104. t.Fatal(err)
  105. }
  106. pc, err := c.ConsumePartition("my_topic", 0, 10)
  107. if err != nil {
  108. t.Fatal(err)
  109. }
  110. assertMessageOffset(t, <-pc.Messages(), 10)
  111. // When
  112. safeClose(t, pc)
  113. pc, err = c.ConsumePartition("my_topic", 0, 10)
  114. if err != nil {
  115. t.Fatal(err)
  116. }
  117. // Then
  118. assertMessageOffset(t, <-pc.Messages(), 10)
  119. safeClose(t, pc)
  120. safeClose(t, c)
  121. broker0.Close()
  122. }
  123. // An attempt to consume the same partition twice should fail.
  124. func TestConsumerDuplicate(t *testing.T) {
  125. // Given
  126. broker0 := newMockBroker(t, 0)
  127. broker0.SetHandlerByMap(map[string]MockResponse{
  128. "MetadataRequest": newMockMetadataResponse(t).
  129. SetBroker(broker0.Addr(), broker0.BrokerID()).
  130. SetLeader("my_topic", 0, broker0.BrokerID()),
  131. "OffsetRequest": newMockOffsetResponse(t).
  132. SetOffset("my_topic", 0, OffsetOldest, 0).
  133. SetOffset("my_topic", 0, OffsetNewest, 1000),
  134. "FetchRequest": newMockFetchResponse(t, 1),
  135. })
  136. config := NewConfig()
  137. config.ChannelBufferSize = 0
  138. c, err := NewConsumer([]string{broker0.Addr()}, config)
  139. if err != nil {
  140. t.Fatal(err)
  141. }
  142. pc1, err := c.ConsumePartition("my_topic", 0, 0)
  143. if err != nil {
  144. t.Fatal(err)
  145. }
  146. // When
  147. pc2, err := c.ConsumePartition("my_topic", 0, 0)
  148. // Then
  149. if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") {
  150. t.Fatal("A partition cannot be consumed twice at the same time")
  151. }
  152. safeClose(t, pc1)
  153. safeClose(t, c)
  154. broker0.Close()
  155. }
  156. // If consumer fails to refresh metadata it keeps retrying with frequency
  157. // specified by `Config.Consumer.Retry.Backoff`.
  158. func TestConsumerLeaderRefreshError(t *testing.T) {
  159. // Given
  160. broker0 := newMockBroker(t, 100)
  161. // Stage 1: my_topic/0 served by broker0
  162. Logger.Printf(" STAGE 1")
  163. broker0.SetHandlerByMap(map[string]MockResponse{
  164. "MetadataRequest": newMockMetadataResponse(t).
  165. SetBroker(broker0.Addr(), broker0.BrokerID()).
  166. SetLeader("my_topic", 0, broker0.BrokerID()),
  167. "OffsetRequest": newMockOffsetResponse(t).
  168. SetOffset("my_topic", 0, OffsetOldest, 123).
  169. SetOffset("my_topic", 0, OffsetNewest, 1000),
  170. "FetchRequest": newMockFetchResponse(t, 1).
  171. SetMessage("my_topic", 0, 123, testMsg),
  172. })
  173. config := NewConfig()
  174. config.Net.ReadTimeout = 100 * time.Millisecond
  175. config.Consumer.Retry.Backoff = 200 * time.Millisecond
  176. config.Consumer.Return.Errors = true
  177. config.Metadata.Retry.Max = 0
  178. c, err := NewConsumer([]string{broker0.Addr()}, config)
  179. if err != nil {
  180. t.Fatal(err)
  181. }
  182. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  183. if err != nil {
  184. t.Fatal(err)
  185. }
  186. assertMessageOffset(t, <-pc.Messages(), 123)
  187. // Stage 2: broker0 says that it is no longer the leader for my_topic/0,
  188. // but the requests to retrieve metadata fail with network timeout.
  189. Logger.Printf(" STAGE 2")
  190. fetchResponse2 := &FetchResponse{}
  191. fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
  192. broker0.SetHandlerByMap(map[string]MockResponse{
  193. "FetchRequest": newMockWrapper(fetchResponse2),
  194. })
  195. if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
  196. t.Errorf("Unexpected error: %v", consErr.Err)
  197. }
  198. // Stage 3: finally the metadata returned by broker0 tells that broker1 is
  199. // a new leader for my_topic/0. Consumption resumes.
  200. Logger.Printf(" STAGE 3")
  201. broker1 := newMockBroker(t, 101)
  202. broker1.SetHandlerByMap(map[string]MockResponse{
  203. "FetchRequest": newMockFetchResponse(t, 1).
  204. SetMessage("my_topic", 0, 124, testMsg),
  205. })
  206. broker0.SetHandlerByMap(map[string]MockResponse{
  207. "MetadataRequest": newMockMetadataResponse(t).
  208. SetBroker(broker0.Addr(), broker0.BrokerID()).
  209. SetBroker(broker1.Addr(), broker1.BrokerID()).
  210. SetLeader("my_topic", 0, broker1.BrokerID()),
  211. })
  212. assertMessageOffset(t, <-pc.Messages(), 124)
  213. safeClose(t, pc)
  214. safeClose(t, c)
  215. broker1.Close()
  216. broker0.Close()
  217. }
  218. func TestConsumerInvalidTopic(t *testing.T) {
  219. // Given
  220. broker0 := newMockBroker(t, 100)
  221. broker0.SetHandlerByMap(map[string]MockResponse{
  222. "MetadataRequest": newMockMetadataResponse(t).
  223. SetBroker(broker0.Addr(), broker0.BrokerID()),
  224. })
  225. c, err := NewConsumer([]string{broker0.Addr()}, nil)
  226. if err != nil {
  227. t.Fatal(err)
  228. }
  229. // When
  230. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  231. // Then
  232. if pc != nil || err != ErrUnknownTopicOrPartition {
  233. t.Errorf("Should fail with, err=%v", err)
  234. }
  235. safeClose(t, c)
  236. broker0.Close()
  237. }
  238. // Nothing bad happens if a partition consumer that has no leader assigned at
  239. // the moment is closed.
  240. func TestConsumerClosePartitionWithoutLeader(t *testing.T) {
  241. // Given
  242. broker0 := newMockBroker(t, 100)
  243. broker0.SetHandlerByMap(map[string]MockResponse{
  244. "MetadataRequest": newMockMetadataResponse(t).
  245. SetBroker(broker0.Addr(), broker0.BrokerID()).
  246. SetLeader("my_topic", 0, broker0.BrokerID()),
  247. "OffsetRequest": newMockOffsetResponse(t).
  248. SetOffset("my_topic", 0, OffsetOldest, 123).
  249. SetOffset("my_topic", 0, OffsetNewest, 1000),
  250. "FetchRequest": newMockFetchResponse(t, 1).
  251. SetMessage("my_topic", 0, 123, testMsg),
  252. })
  253. config := NewConfig()
  254. config.Net.ReadTimeout = 100 * time.Millisecond
  255. config.Consumer.Retry.Backoff = 100 * time.Millisecond
  256. config.Consumer.Return.Errors = true
  257. config.Metadata.Retry.Max = 0
  258. c, err := NewConsumer([]string{broker0.Addr()}, config)
  259. if err != nil {
  260. t.Fatal(err)
  261. }
  262. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  263. if err != nil {
  264. t.Fatal(err)
  265. }
  266. assertMessageOffset(t, <-pc.Messages(), 123)
  267. // broker0 says that it is no longer the leader for my_topic/0, but the
  268. // requests to retrieve metadata fail with network timeout.
  269. fetchResponse2 := &FetchResponse{}
  270. fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
  271. broker0.SetHandlerByMap(map[string]MockResponse{
  272. "FetchRequest": newMockWrapper(fetchResponse2),
  273. })
  274. // When
  275. if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
  276. t.Errorf("Unexpected error: %v", consErr.Err)
  277. }
  278. // Then: the partition consumer can be closed without any problem.
  279. safeClose(t, pc)
  280. safeClose(t, c)
  281. broker0.Close()
  282. }
  283. // If the initial offset passed on partition consumer creation is out of the
  284. // actual offset range for the partition, then the partition consumer stops
  285. // immediately closing its output channels.
  286. func TestConsumerShutsDownOutOfRange(t *testing.T) {
  287. // Given
  288. broker0 := newMockBroker(t, 0)
  289. broker0.SetHandler(func(req *request) (res encoder) {
  290. switch reqBody := req.body.(type) {
  291. case *MetadataRequest:
  292. return newMockMetadataResponse(t).
  293. SetBroker(broker0.Addr(), broker0.BrokerID()).
  294. SetLeader("my_topic", 0, broker0.BrokerID()).
  295. For(reqBody)
  296. case *OffsetRequest:
  297. return newMockOffsetResponse(t).
  298. SetOffset("my_topic", 0, OffsetNewest, 1234).
  299. SetOffset("my_topic", 0, OffsetOldest, 7).
  300. For(reqBody)
  301. case *FetchRequest:
  302. fetchResponse := new(FetchResponse)
  303. fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
  304. return fetchResponse
  305. }
  306. return nil
  307. })
  308. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  309. if err != nil {
  310. t.Fatal(err)
  311. }
  312. // When
  313. consumer, err := master.ConsumePartition("my_topic", 0, 101)
  314. if err != nil {
  315. t.Fatal(err)
  316. }
  317. // Then: consumer should shut down closing its messages and errors channels.
  318. if _, ok := <-consumer.Messages(); ok {
  319. t.Error("Expected the consumer to shut down")
  320. }
  321. safeClose(t, consumer)
  322. safeClose(t, master)
  323. broker0.Close()
  324. }
  325. // If a fetch response contains messages with offsets that are smaller then
  326. // requested, then such messages are ignored.
  327. func TestConsumerExtraOffsets(t *testing.T) {
  328. // Given
  329. broker0 := newMockBroker(t, 0)
  330. called := 0
  331. broker0.SetHandler(func(req *request) (res encoder) {
  332. switch req.body.(type) {
  333. case *MetadataRequest:
  334. return newMockMetadataResponse(t).
  335. SetBroker(broker0.Addr(), broker0.BrokerID()).
  336. SetLeader("my_topic", 0, broker0.BrokerID()).For(req.body)
  337. case *OffsetRequest:
  338. return newMockOffsetResponse(t).
  339. SetOffset("my_topic", 0, OffsetNewest, 1234).
  340. SetOffset("my_topic", 0, OffsetOldest, 0).For(req.body)
  341. case *FetchRequest:
  342. fetchResponse := &FetchResponse{}
  343. called++
  344. if called > 1 {
  345. fetchResponse.AddError("my_topic", 0, ErrNoError)
  346. return fetchResponse
  347. }
  348. fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1)
  349. fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2)
  350. fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3)
  351. fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4)
  352. return fetchResponse
  353. }
  354. return nil
  355. })
  356. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  357. if err != nil {
  358. t.Fatal(err)
  359. }
  360. // When
  361. consumer, err := master.ConsumePartition("my_topic", 0, 3)
  362. if err != nil {
  363. t.Fatal(err)
  364. }
  365. // Then: messages with offsets 1 and 2 are not returned even though they
  366. // are present in the response.
  367. assertMessageOffset(t, <-consumer.Messages(), 3)
  368. assertMessageOffset(t, <-consumer.Messages(), 4)
  369. safeClose(t, consumer)
  370. safeClose(t, master)
  371. broker0.Close()
  372. }
  373. // It is fine if offsets of fetched messages are not sequential (although
  374. // strictly increasing!).
  375. func TestConsumerNonSequentialOffsets(t *testing.T) {
  376. // Given
  377. broker0 := newMockBroker(t, 0)
  378. called := 0
  379. broker0.SetHandler(func(req *request) (res encoder) {
  380. switch req.body.(type) {
  381. case *MetadataRequest:
  382. return newMockMetadataResponse(t).
  383. SetBroker(broker0.Addr(), broker0.BrokerID()).
  384. SetLeader("my_topic", 0, broker0.BrokerID()).For(req.body)
  385. case *OffsetRequest:
  386. return newMockOffsetResponse(t).
  387. SetOffset("my_topic", 0, OffsetNewest, 1234).
  388. SetOffset("my_topic", 0, OffsetOldest, 0).For(req.body)
  389. case *FetchRequest:
  390. called++
  391. fetchResponse := &FetchResponse{}
  392. if called > 1 {
  393. fetchResponse.AddError("my_topic", 0, ErrNoError)
  394. return fetchResponse
  395. }
  396. fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5)
  397. fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7)
  398. fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11)
  399. return fetchResponse
  400. }
  401. return nil
  402. })
  403. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  404. if err != nil {
  405. t.Fatal(err)
  406. }
  407. // When
  408. consumer, err := master.ConsumePartition("my_topic", 0, 3)
  409. if err != nil {
  410. t.Fatal(err)
  411. }
  412. // Then: messages with offsets 1 and 2 are not returned even though they
  413. // are present in the response.
  414. assertMessageOffset(t, <-consumer.Messages(), 5)
  415. assertMessageOffset(t, <-consumer.Messages(), 7)
  416. assertMessageOffset(t, <-consumer.Messages(), 11)
  417. safeClose(t, consumer)
  418. safeClose(t, master)
  419. broker0.Close()
  420. }
  421. // If leadership for a partition is changing then consumer resolves the new
  422. // leader and switches to it.
  423. func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
  424. // initial setup
  425. seedBroker := newMockBroker(t, 10)
  426. leader0 := newMockBroker(t, 0)
  427. leader1 := newMockBroker(t, 1)
  428. seedBroker.SetHandlerByMap(map[string]MockResponse{
  429. "MetadataRequest": newMockMetadataResponse(t).
  430. SetBroker(leader0.Addr(), leader0.BrokerID()).
  431. SetBroker(leader1.Addr(), leader1.BrokerID()).
  432. SetLeader("my_topic", 0, leader0.BrokerID()).
  433. SetLeader("my_topic", 1, leader1.BrokerID()),
  434. })
  435. mockOffsetResponse1 := newMockOffsetResponse(t).
  436. SetOffset("my_topic", 0, OffsetOldest, 0).
  437. SetOffset("my_topic", 0, OffsetNewest, 1000).
  438. SetOffset("my_topic", 1, OffsetOldest, 0).
  439. SetOffset("my_topic", 1, OffsetNewest, 1000)
  440. leader0.SetHandlerByMap(map[string]MockResponse{
  441. "OffsetRequest": mockOffsetResponse1,
  442. "FetchRequest": newMockFetchResponse(t, 1),
  443. })
  444. leader1.SetHandlerByMap(map[string]MockResponse{
  445. "OffsetRequest": mockOffsetResponse1,
  446. "FetchRequest": newMockFetchResponse(t, 1),
  447. })
  448. // launch test goroutines
  449. config := NewConfig()
  450. config.Consumer.Retry.Backoff = 50
  451. master, err := NewConsumer([]string{seedBroker.Addr()}, config)
  452. if err != nil {
  453. t.Fatal(err)
  454. }
  455. // we expect to end up (eventually) consuming exactly ten messages on each partition
  456. var wg sync.WaitGroup
  457. for i := int32(0); i < 2; i++ {
  458. consumer, err := master.ConsumePartition("my_topic", i, 0)
  459. if err != nil {
  460. t.Error(err)
  461. }
  462. go func(c PartitionConsumer) {
  463. for err := range c.Errors() {
  464. t.Error(err)
  465. }
  466. }(consumer)
  467. wg.Add(1)
  468. go func(partition int32, c PartitionConsumer) {
  469. for i := 0; i < 10; i++ {
  470. message := <-consumer.Messages()
  471. if message.Offset != int64(i) {
  472. t.Error("Incorrect message offset!", i, partition, message.Offset)
  473. }
  474. if message.Partition != partition {
  475. t.Error("Incorrect message partition!")
  476. }
  477. }
  478. safeClose(t, consumer)
  479. wg.Done()
  480. }(i, consumer)
  481. }
  482. time.Sleep(50 * time.Millisecond)
  483. Logger.Printf(" STAGE 1")
  484. // Stage 1:
  485. // * my_topic/0 -> leader0 serves 4 messages
  486. // * my_topic/1 -> leader1 serves 0 messages
  487. mockFetchResponse := newMockFetchResponse(t, 1)
  488. for i := 0; i < 4; i++ {
  489. mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg)
  490. }
  491. leader0.SetHandlerByMap(map[string]MockResponse{
  492. "FetchRequest": mockFetchResponse,
  493. })
  494. time.Sleep(50 * time.Millisecond)
  495. Logger.Printf(" STAGE 2")
  496. // Stage 2:
  497. // * leader0 says that it is no longer serving my_topic/0
  498. // * seedBroker tells that leader1 is serving my_topic/0 now
  499. // seed broker tells that the new partition 0 leader is leader1
  500. seedBroker.SetHandlerByMap(map[string]MockResponse{
  501. "MetadataRequest": newMockMetadataResponse(t).
  502. SetLeader("my_topic", 0, leader1.BrokerID()).
  503. SetLeader("my_topic", 1, leader1.BrokerID()),
  504. })
  505. // leader0 says no longer leader of partition 0
  506. leader0.SetHandler(func(req *request) (res encoder) {
  507. switch req.body.(type) {
  508. case *FetchRequest:
  509. fetchResponse := new(FetchResponse)
  510. fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
  511. return fetchResponse
  512. }
  513. return nil
  514. })
  515. time.Sleep(50 * time.Millisecond)
  516. Logger.Printf(" STAGE 3")
  517. // Stage 3:
  518. // * my_topic/0 -> leader1 serves 3 messages
  519. // * my_topic/1 -> leader1 server 8 messages
  520. // leader1 provides 3 message on partition 0, and 8 messages on partition 1
  521. mockFetchResponse2 := newMockFetchResponse(t, 2)
  522. for i := 4; i < 7; i++ {
  523. mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg)
  524. }
  525. for i := 0; i < 8; i++ {
  526. mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg)
  527. }
  528. leader1.SetHandlerByMap(map[string]MockResponse{
  529. "FetchRequest": mockFetchResponse2,
  530. })
  531. time.Sleep(50 * time.Millisecond)
  532. Logger.Printf(" STAGE 4")
  533. // Stage 4:
  534. // * my_topic/0 -> leader1 serves 3 messages
  535. // * my_topic/1 -> leader1 tells that it is no longer the leader
  536. // * seedBroker tells that leader0 is a new leader for my_topic/1
  537. // metadata assigns 0 to leader1 and 1 to leader0
  538. seedBroker.SetHandlerByMap(map[string]MockResponse{
  539. "MetadataRequest": newMockMetadataResponse(t).
  540. SetLeader("my_topic", 0, leader1.BrokerID()).
  541. SetLeader("my_topic", 1, leader0.BrokerID()),
  542. })
  543. // leader1 provides three more messages on partition0, says no longer leader of partition1
  544. mockFetchResponse3 := newMockFetchResponse(t, 3).
  545. SetMessage("my_topic", 0, int64(7), testMsg).
  546. SetMessage("my_topic", 0, int64(8), testMsg).
  547. SetMessage("my_topic", 0, int64(9), testMsg)
  548. leader1.SetHandler(func(req *request) (res encoder) {
  549. switch reqBody := req.body.(type) {
  550. case *FetchRequest:
  551. res := mockFetchResponse3.For(reqBody).(*FetchResponse)
  552. res.AddError("my_topic", 1, ErrNotLeaderForPartition)
  553. return res
  554. }
  555. return nil
  556. })
  557. // leader0 provides two messages on partition 1
  558. mockFetchResponse4 := newMockFetchResponse(t, 2)
  559. for i := 8; i < 10; i++ {
  560. mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg)
  561. }
  562. leader0.SetHandlerByMap(map[string]MockResponse{
  563. "FetchRequest": mockFetchResponse4,
  564. })
  565. wg.Wait()
  566. safeClose(t, master)
  567. leader1.Close()
  568. leader0.Close()
  569. seedBroker.Close()
  570. }
  571. // When two partitions have the same broker as the leader, if one partition
  572. // consumer channel buffer is full then that does not affect the ability to
  573. // read messages by the other consumer.
  574. func TestConsumerInterleavedClose(t *testing.T) {
  575. // Given
  576. broker0 := newMockBroker(t, 0)
  577. broker0.SetHandlerByMap(map[string]MockResponse{
  578. "MetadataRequest": newMockMetadataResponse(t).
  579. SetBroker(broker0.Addr(), broker0.BrokerID()).
  580. SetLeader("my_topic", 0, broker0.BrokerID()).
  581. SetLeader("my_topic", 1, broker0.BrokerID()),
  582. "OffsetRequest": newMockOffsetResponse(t).
  583. SetOffset("my_topic", 0, OffsetOldest, 1000).
  584. SetOffset("my_topic", 0, OffsetNewest, 1100).
  585. SetOffset("my_topic", 1, OffsetOldest, 2000).
  586. SetOffset("my_topic", 1, OffsetNewest, 2100),
  587. "FetchRequest": newMockFetchResponse(t, 1).
  588. SetMessage("my_topic", 0, 1000, testMsg).
  589. SetMessage("my_topic", 0, 1001, testMsg).
  590. SetMessage("my_topic", 0, 1002, testMsg).
  591. SetMessage("my_topic", 1, 2000, testMsg),
  592. })
  593. config := NewConfig()
  594. config.ChannelBufferSize = 0
  595. master, err := NewConsumer([]string{broker0.Addr()}, config)
  596. if err != nil {
  597. t.Fatal(err)
  598. }
  599. c0, err := master.ConsumePartition("my_topic", 0, 1000)
  600. if err != nil {
  601. t.Fatal(err)
  602. }
  603. c1, err := master.ConsumePartition("my_topic", 1, 2000)
  604. if err != nil {
  605. t.Fatal(err)
  606. }
  607. // When/Then: we can read from partition 0 even if nobody reads from partition 1
  608. assertMessageOffset(t, <-c0.Messages(), 1000)
  609. assertMessageOffset(t, <-c0.Messages(), 1001)
  610. assertMessageOffset(t, <-c0.Messages(), 1002)
  611. safeClose(t, c1)
  612. safeClose(t, c0)
  613. safeClose(t, master)
  614. broker0.Close()
  615. }
  616. func TestConsumerBounceWithReferenceOpen(t *testing.T) {
  617. broker0 := newMockBroker(t, 0)
  618. broker0Addr := broker0.Addr()
  619. broker1 := newMockBroker(t, 1)
  620. mockMetadataResponse := newMockMetadataResponse(t).
  621. SetBroker(broker0.Addr(), broker0.BrokerID()).
  622. SetBroker(broker1.Addr(), broker1.BrokerID()).
  623. SetLeader("my_topic", 0, broker0.BrokerID()).
  624. SetLeader("my_topic", 1, broker1.BrokerID())
  625. mockOffsetResponse := newMockOffsetResponse(t).
  626. SetOffset("my_topic", 0, OffsetOldest, 1000).
  627. SetOffset("my_topic", 0, OffsetNewest, 1100).
  628. SetOffset("my_topic", 1, OffsetOldest, 2000).
  629. SetOffset("my_topic", 1, OffsetNewest, 2100)
  630. mockFetchResponse := newMockFetchResponse(t, 1)
  631. for i := 0; i < 10; i++ {
  632. mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg)
  633. mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg)
  634. }
  635. broker0.SetHandlerByMap(map[string]MockResponse{
  636. "OffsetRequest": mockOffsetResponse,
  637. "FetchRequest": mockFetchResponse,
  638. })
  639. broker1.SetHandlerByMap(map[string]MockResponse{
  640. "MetadataRequest": mockMetadataResponse,
  641. "OffsetRequest": mockOffsetResponse,
  642. "FetchRequest": mockFetchResponse,
  643. })
  644. config := NewConfig()
  645. config.Consumer.Return.Errors = true
  646. config.Consumer.Retry.Backoff = 100 * time.Millisecond
  647. config.ChannelBufferSize = 1
  648. master, err := NewConsumer([]string{broker1.Addr()}, config)
  649. if err != nil {
  650. t.Fatal(err)
  651. }
  652. c0, err := master.ConsumePartition("my_topic", 0, 1000)
  653. if err != nil {
  654. t.Fatal(err)
  655. }
  656. c1, err := master.ConsumePartition("my_topic", 1, 2000)
  657. if err != nil {
  658. t.Fatal(err)
  659. }
  660. // read messages from both partition to make sure that both brokers operate
  661. // normally.
  662. assertMessageOffset(t, <-c0.Messages(), 1000)
  663. assertMessageOffset(t, <-c1.Messages(), 2000)
  664. // Simulate broker shutdown. Note that metadata response does not change,
  665. // that is the leadership does not move to another broker. So partition
  666. // consumer will keep retrying to restore the connection with the broker.
  667. broker0.Close()
  668. // Make sure that while the partition/0 leader is down, consumer/partition/1
  669. // is capable of pulling messages from broker1.
  670. for i := 1; i < 7; i++ {
  671. offset := (<-c1.Messages()).Offset
  672. if offset != int64(2000+i) {
  673. t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i))
  674. }
  675. }
  676. // Bring broker0 back to service.
  677. broker0 = newMockBrokerAddr(t, 0, broker0Addr)
  678. broker0.SetHandlerByMap(map[string]MockResponse{
  679. "FetchRequest": mockFetchResponse,
  680. })
  681. // Read the rest of messages from both partitions.
  682. for i := 7; i < 10; i++ {
  683. assertMessageOffset(t, <-c1.Messages(), int64(2000+i))
  684. }
  685. for i := 1; i < 10; i++ {
  686. assertMessageOffset(t, <-c0.Messages(), int64(1000+i))
  687. }
  688. select {
  689. case <-c0.Errors():
  690. default:
  691. t.Errorf("Partition consumer should have detected broker restart")
  692. }
  693. safeClose(t, c1)
  694. safeClose(t, c0)
  695. safeClose(t, master)
  696. broker0.Close()
  697. broker1.Close()
  698. }
  699. func TestConsumerOffsetOutOfRange(t *testing.T) {
  700. // Given
  701. broker0 := newMockBroker(t, 2)
  702. broker0.SetHandlerByMap(map[string]MockResponse{
  703. "MetadataRequest": newMockMetadataResponse(t).
  704. SetBroker(broker0.Addr(), broker0.BrokerID()).
  705. SetLeader("my_topic", 0, broker0.BrokerID()),
  706. "OffsetRequest": newMockOffsetResponse(t).
  707. SetOffset("my_topic", 0, OffsetNewest, 1234).
  708. SetOffset("my_topic", 0, OffsetOldest, 2345),
  709. })
  710. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  711. if err != nil {
  712. t.Fatal(err)
  713. }
  714. // When/Then
  715. if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange {
  716. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  717. }
  718. if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange {
  719. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  720. }
  721. if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange {
  722. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  723. }
  724. safeClose(t, master)
  725. broker0.Close()
  726. }
  727. func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
  728. if msg.Offset != expectedOffset {
  729. t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
  730. }
  731. }
  732. // This example shows how to use the consumer to read messages
  733. // from a single partition.
  734. func ExampleConsumer() {
  735. consumer, err := NewConsumer([]string{"localhost:9092"}, nil)
  736. if err != nil {
  737. panic(err)
  738. }
  739. defer func() {
  740. if err := consumer.Close(); err != nil {
  741. log.Fatalln(err)
  742. }
  743. }()
  744. partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest)
  745. if err != nil {
  746. panic(err)
  747. }
  748. defer func() {
  749. if err := partitionConsumer.Close(); err != nil {
  750. log.Fatalln(err)
  751. }
  752. }()
  753. // Trap SIGINT to trigger a shutdown.
  754. signals := make(chan os.Signal, 1)
  755. signal.Notify(signals, os.Interrupt)
  756. consumed := 0
  757. ConsumerLoop:
  758. for {
  759. select {
  760. case msg := <-partitionConsumer.Messages():
  761. log.Printf("Consumed message offset %d\n", msg.Offset)
  762. consumed++
  763. case <-signals:
  764. break ConsumerLoop
  765. }
  766. }
  767. log.Printf("Consumed: %d\n", consumed)
  768. }