consumer_test.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. package sarama
  2. import (
  3. "log"
  4. "os"
  5. "os/signal"
  6. "sync"
  7. "testing"
  8. "time"
  9. )
  10. var testMsg = StringEncoder("Foo")
  11. // If a particular offset is provided then messages are consumed starting from
  12. // that offset.
  13. func TestConsumerOffsetManual(t *testing.T) {
  14. // Given
  15. broker0 := NewMockBroker(t, 0)
  16. mockFetchResponse := NewMockFetchResponse(t, 1)
  17. for i := 0; i < 10; i++ {
  18. mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg)
  19. }
  20. broker0.SetHandlerByMap(map[string]MockResponse{
  21. "MetadataRequest": NewMockMetadataResponse(t).
  22. SetBroker(broker0.Addr(), broker0.BrokerID()).
  23. SetLeader("my_topic", 0, broker0.BrokerID()),
  24. "OffsetRequest": NewMockOffsetResponse(t).
  25. SetOffset("my_topic", 0, OffsetOldest, 0).
  26. SetOffset("my_topic", 0, OffsetNewest, 2345),
  27. "FetchRequest": mockFetchResponse,
  28. })
  29. // When
  30. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  31. if err != nil {
  32. t.Fatal(err)
  33. }
  34. consumer, err := master.ConsumePartition("my_topic", 0, 1234)
  35. if err != nil {
  36. t.Fatal(err)
  37. }
  38. // Then: messages starting from offset 1234 are consumed.
  39. for i := 0; i < 10; i++ {
  40. select {
  41. case message := <-consumer.Messages():
  42. assertMessageOffset(t, message, int64(i+1234))
  43. case err := <-consumer.Errors():
  44. t.Error(err)
  45. }
  46. }
  47. safeClose(t, consumer)
  48. safeClose(t, master)
  49. broker0.Close()
  50. }
  51. // If `OffsetNewest` is passed as the initial offset then the first consumed
  52. // message is indeed corresponds to the offset that broker claims to be the
  53. // newest in its metadata response.
  54. func TestConsumerOffsetNewest(t *testing.T) {
  55. // Given
  56. broker0 := NewMockBroker(t, 0)
  57. broker0.SetHandlerByMap(map[string]MockResponse{
  58. "MetadataRequest": NewMockMetadataResponse(t).
  59. SetBroker(broker0.Addr(), broker0.BrokerID()).
  60. SetLeader("my_topic", 0, broker0.BrokerID()),
  61. "OffsetRequest": NewMockOffsetResponse(t).
  62. SetOffset("my_topic", 0, OffsetNewest, 10).
  63. SetOffset("my_topic", 0, OffsetOldest, 7),
  64. "FetchRequest": NewMockFetchResponse(t, 1).
  65. SetMessage("my_topic", 0, 9, testMsg).
  66. SetMessage("my_topic", 0, 10, testMsg).
  67. SetMessage("my_topic", 0, 11, testMsg).
  68. SetHighWaterMark("my_topic", 0, 14),
  69. })
  70. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  71. if err != nil {
  72. t.Fatal(err)
  73. }
  74. // When
  75. consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
  76. if err != nil {
  77. t.Fatal(err)
  78. }
  79. // Then
  80. assertMessageOffset(t, <-consumer.Messages(), 10)
  81. if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 {
  82. t.Errorf("Expected high water mark offset 14, found %d", hwmo)
  83. }
  84. safeClose(t, consumer)
  85. safeClose(t, master)
  86. broker0.Close()
  87. }
  88. // It is possible to close a partition consumer and create the same anew.
  89. func TestConsumerRecreate(t *testing.T) {
  90. // Given
  91. broker0 := NewMockBroker(t, 0)
  92. broker0.SetHandlerByMap(map[string]MockResponse{
  93. "MetadataRequest": NewMockMetadataResponse(t).
  94. SetBroker(broker0.Addr(), broker0.BrokerID()).
  95. SetLeader("my_topic", 0, broker0.BrokerID()),
  96. "OffsetRequest": NewMockOffsetResponse(t).
  97. SetOffset("my_topic", 0, OffsetOldest, 0).
  98. SetOffset("my_topic", 0, OffsetNewest, 1000),
  99. "FetchRequest": NewMockFetchResponse(t, 1).
  100. SetMessage("my_topic", 0, 10, testMsg),
  101. })
  102. c, err := NewConsumer([]string{broker0.Addr()}, nil)
  103. if err != nil {
  104. t.Fatal(err)
  105. }
  106. pc, err := c.ConsumePartition("my_topic", 0, 10)
  107. if err != nil {
  108. t.Fatal(err)
  109. }
  110. assertMessageOffset(t, <-pc.Messages(), 10)
  111. // When
  112. safeClose(t, pc)
  113. pc, err = c.ConsumePartition("my_topic", 0, 10)
  114. if err != nil {
  115. t.Fatal(err)
  116. }
  117. // Then
  118. assertMessageOffset(t, <-pc.Messages(), 10)
  119. safeClose(t, pc)
  120. safeClose(t, c)
  121. broker0.Close()
  122. }
  123. // An attempt to consume the same partition twice should fail.
  124. func TestConsumerDuplicate(t *testing.T) {
  125. // Given
  126. broker0 := NewMockBroker(t, 0)
  127. broker0.SetHandlerByMap(map[string]MockResponse{
  128. "MetadataRequest": NewMockMetadataResponse(t).
  129. SetBroker(broker0.Addr(), broker0.BrokerID()).
  130. SetLeader("my_topic", 0, broker0.BrokerID()),
  131. "OffsetRequest": NewMockOffsetResponse(t).
  132. SetOffset("my_topic", 0, OffsetOldest, 0).
  133. SetOffset("my_topic", 0, OffsetNewest, 1000),
  134. "FetchRequest": NewMockFetchResponse(t, 1),
  135. })
  136. config := NewConfig()
  137. config.ChannelBufferSize = 0
  138. c, err := NewConsumer([]string{broker0.Addr()}, config)
  139. if err != nil {
  140. t.Fatal(err)
  141. }
  142. pc1, err := c.ConsumePartition("my_topic", 0, 0)
  143. if err != nil {
  144. t.Fatal(err)
  145. }
  146. // When
  147. pc2, err := c.ConsumePartition("my_topic", 0, 0)
  148. // Then
  149. if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") {
  150. t.Fatal("A partition cannot be consumed twice at the same time")
  151. }
  152. safeClose(t, pc1)
  153. safeClose(t, c)
  154. broker0.Close()
  155. }
  156. // If consumer fails to refresh metadata it keeps retrying with frequency
  157. // specified by `Config.Consumer.Retry.Backoff`.
  158. func TestConsumerLeaderRefreshError(t *testing.T) {
  159. // Given
  160. broker0 := NewMockBroker(t, 100)
  161. // Stage 1: my_topic/0 served by broker0
  162. Logger.Printf(" STAGE 1")
  163. broker0.SetHandlerByMap(map[string]MockResponse{
  164. "MetadataRequest": NewMockMetadataResponse(t).
  165. SetBroker(broker0.Addr(), broker0.BrokerID()).
  166. SetLeader("my_topic", 0, broker0.BrokerID()),
  167. "OffsetRequest": NewMockOffsetResponse(t).
  168. SetOffset("my_topic", 0, OffsetOldest, 123).
  169. SetOffset("my_topic", 0, OffsetNewest, 1000),
  170. "FetchRequest": NewMockFetchResponse(t, 1).
  171. SetMessage("my_topic", 0, 123, testMsg),
  172. })
  173. config := NewConfig()
  174. config.Net.ReadTimeout = 100 * time.Millisecond
  175. config.Consumer.Retry.Backoff = 200 * time.Millisecond
  176. config.Consumer.Return.Errors = true
  177. config.Metadata.Retry.Max = 0
  178. c, err := NewConsumer([]string{broker0.Addr()}, config)
  179. if err != nil {
  180. t.Fatal(err)
  181. }
  182. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  183. if err != nil {
  184. t.Fatal(err)
  185. }
  186. assertMessageOffset(t, <-pc.Messages(), 123)
  187. // Stage 2: broker0 says that it is no longer the leader for my_topic/0,
  188. // but the requests to retrieve metadata fail with network timeout.
  189. Logger.Printf(" STAGE 2")
  190. fetchResponse2 := &FetchResponse{}
  191. fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
  192. broker0.SetHandlerByMap(map[string]MockResponse{
  193. "FetchRequest": NewMockWrapper(fetchResponse2),
  194. })
  195. if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
  196. t.Errorf("Unexpected error: %v", consErr.Err)
  197. }
  198. // Stage 3: finally the metadata returned by broker0 tells that broker1 is
  199. // a new leader for my_topic/0. Consumption resumes.
  200. Logger.Printf(" STAGE 3")
  201. broker1 := NewMockBroker(t, 101)
  202. broker1.SetHandlerByMap(map[string]MockResponse{
  203. "FetchRequest": NewMockFetchResponse(t, 1).
  204. SetMessage("my_topic", 0, 124, testMsg),
  205. })
  206. broker0.SetHandlerByMap(map[string]MockResponse{
  207. "MetadataRequest": NewMockMetadataResponse(t).
  208. SetBroker(broker0.Addr(), broker0.BrokerID()).
  209. SetBroker(broker1.Addr(), broker1.BrokerID()).
  210. SetLeader("my_topic", 0, broker1.BrokerID()),
  211. })
  212. assertMessageOffset(t, <-pc.Messages(), 124)
  213. safeClose(t, pc)
  214. safeClose(t, c)
  215. broker1.Close()
  216. broker0.Close()
  217. }
  218. func TestConsumerInvalidTopic(t *testing.T) {
  219. // Given
  220. broker0 := NewMockBroker(t, 100)
  221. broker0.SetHandlerByMap(map[string]MockResponse{
  222. "MetadataRequest": NewMockMetadataResponse(t).
  223. SetBroker(broker0.Addr(), broker0.BrokerID()),
  224. })
  225. c, err := NewConsumer([]string{broker0.Addr()}, nil)
  226. if err != nil {
  227. t.Fatal(err)
  228. }
  229. // When
  230. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  231. // Then
  232. if pc != nil || err != ErrUnknownTopicOrPartition {
  233. t.Errorf("Should fail with, err=%v", err)
  234. }
  235. safeClose(t, c)
  236. broker0.Close()
  237. }
  238. // Nothing bad happens if a partition consumer that has no leader assigned at
  239. // the moment is closed.
  240. func TestConsumerClosePartitionWithoutLeader(t *testing.T) {
  241. // Given
  242. broker0 := NewMockBroker(t, 100)
  243. broker0.SetHandlerByMap(map[string]MockResponse{
  244. "MetadataRequest": NewMockMetadataResponse(t).
  245. SetBroker(broker0.Addr(), broker0.BrokerID()).
  246. SetLeader("my_topic", 0, broker0.BrokerID()),
  247. "OffsetRequest": NewMockOffsetResponse(t).
  248. SetOffset("my_topic", 0, OffsetOldest, 123).
  249. SetOffset("my_topic", 0, OffsetNewest, 1000),
  250. "FetchRequest": NewMockFetchResponse(t, 1).
  251. SetMessage("my_topic", 0, 123, testMsg),
  252. })
  253. config := NewConfig()
  254. config.Net.ReadTimeout = 100 * time.Millisecond
  255. config.Consumer.Retry.Backoff = 100 * time.Millisecond
  256. config.Consumer.Return.Errors = true
  257. config.Metadata.Retry.Max = 0
  258. c, err := NewConsumer([]string{broker0.Addr()}, config)
  259. if err != nil {
  260. t.Fatal(err)
  261. }
  262. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  263. if err != nil {
  264. t.Fatal(err)
  265. }
  266. assertMessageOffset(t, <-pc.Messages(), 123)
  267. // broker0 says that it is no longer the leader for my_topic/0, but the
  268. // requests to retrieve metadata fail with network timeout.
  269. fetchResponse2 := &FetchResponse{}
  270. fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
  271. broker0.SetHandlerByMap(map[string]MockResponse{
  272. "FetchRequest": NewMockWrapper(fetchResponse2),
  273. })
  274. // When
  275. if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
  276. t.Errorf("Unexpected error: %v", consErr.Err)
  277. }
  278. // Then: the partition consumer can be closed without any problem.
  279. safeClose(t, pc)
  280. safeClose(t, c)
  281. broker0.Close()
  282. }
  283. // If the initial offset passed on partition consumer creation is out of the
  284. // actual offset range for the partition, then the partition consumer stops
  285. // immediately closing its output channels.
  286. func TestConsumerShutsDownOutOfRange(t *testing.T) {
  287. // Given
  288. broker0 := NewMockBroker(t, 0)
  289. fetchResponse := new(FetchResponse)
  290. fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
  291. broker0.SetHandlerByMap(map[string]MockResponse{
  292. "MetadataRequest": NewMockMetadataResponse(t).
  293. SetBroker(broker0.Addr(), broker0.BrokerID()).
  294. SetLeader("my_topic", 0, broker0.BrokerID()),
  295. "OffsetRequest": NewMockOffsetResponse(t).
  296. SetOffset("my_topic", 0, OffsetNewest, 1234).
  297. SetOffset("my_topic", 0, OffsetOldest, 7),
  298. "FetchRequest": NewMockWrapper(fetchResponse),
  299. })
  300. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  301. if err != nil {
  302. t.Fatal(err)
  303. }
  304. // When
  305. consumer, err := master.ConsumePartition("my_topic", 0, 101)
  306. if err != nil {
  307. t.Fatal(err)
  308. }
  309. // Then: consumer should shut down closing its messages and errors channels.
  310. if _, ok := <-consumer.Messages(); ok {
  311. t.Error("Expected the consumer to shut down")
  312. }
  313. safeClose(t, consumer)
  314. safeClose(t, master)
  315. broker0.Close()
  316. }
  317. // If a fetch response contains messages with offsets that are smaller then
  318. // requested, then such messages are ignored.
  319. func TestConsumerExtraOffsets(t *testing.T) {
  320. // Given
  321. broker0 := NewMockBroker(t, 0)
  322. fetchResponse1 := &FetchResponse{}
  323. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
  324. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
  325. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 3)
  326. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 4)
  327. fetchResponse2 := &FetchResponse{}
  328. fetchResponse2.AddError("my_topic", 0, ErrNoError)
  329. broker0.SetHandlerByMap(map[string]MockResponse{
  330. "MetadataRequest": NewMockMetadataResponse(t).
  331. SetBroker(broker0.Addr(), broker0.BrokerID()).
  332. SetLeader("my_topic", 0, broker0.BrokerID()),
  333. "OffsetRequest": NewMockOffsetResponse(t).
  334. SetOffset("my_topic", 0, OffsetNewest, 1234).
  335. SetOffset("my_topic", 0, OffsetOldest, 0),
  336. "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
  337. })
  338. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  339. if err != nil {
  340. t.Fatal(err)
  341. }
  342. // When
  343. consumer, err := master.ConsumePartition("my_topic", 0, 3)
  344. if err != nil {
  345. t.Fatal(err)
  346. }
  347. // Then: messages with offsets 1 and 2 are not returned even though they
  348. // are present in the response.
  349. assertMessageOffset(t, <-consumer.Messages(), 3)
  350. assertMessageOffset(t, <-consumer.Messages(), 4)
  351. safeClose(t, consumer)
  352. safeClose(t, master)
  353. broker0.Close()
  354. }
  355. // It is fine if offsets of fetched messages are not sequential (although
  356. // strictly increasing!).
  357. func TestConsumerNonSequentialOffsets(t *testing.T) {
  358. // Given
  359. broker0 := NewMockBroker(t, 0)
  360. fetchResponse1 := &FetchResponse{}
  361. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 5)
  362. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 7)
  363. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 11)
  364. fetchResponse2 := &FetchResponse{}
  365. fetchResponse2.AddError("my_topic", 0, ErrNoError)
  366. broker0.SetHandlerByMap(map[string]MockResponse{
  367. "MetadataRequest": NewMockMetadataResponse(t).
  368. SetBroker(broker0.Addr(), broker0.BrokerID()).
  369. SetLeader("my_topic", 0, broker0.BrokerID()),
  370. "OffsetRequest": NewMockOffsetResponse(t).
  371. SetOffset("my_topic", 0, OffsetNewest, 1234).
  372. SetOffset("my_topic", 0, OffsetOldest, 0),
  373. "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
  374. })
  375. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  376. if err != nil {
  377. t.Fatal(err)
  378. }
  379. // When
  380. consumer, err := master.ConsumePartition("my_topic", 0, 3)
  381. if err != nil {
  382. t.Fatal(err)
  383. }
  384. // Then: messages with offsets 1 and 2 are not returned even though they
  385. // are present in the response.
  386. assertMessageOffset(t, <-consumer.Messages(), 5)
  387. assertMessageOffset(t, <-consumer.Messages(), 7)
  388. assertMessageOffset(t, <-consumer.Messages(), 11)
  389. safeClose(t, consumer)
  390. safeClose(t, master)
  391. broker0.Close()
  392. }
  393. // If leadership for a partition is changing then consumer resolves the new
  394. // leader and switches to it.
  395. func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
  396. // initial setup
  397. seedBroker := NewMockBroker(t, 10)
  398. leader0 := NewMockBroker(t, 0)
  399. leader1 := NewMockBroker(t, 1)
  400. seedBroker.SetHandlerByMap(map[string]MockResponse{
  401. "MetadataRequest": NewMockMetadataResponse(t).
  402. SetBroker(leader0.Addr(), leader0.BrokerID()).
  403. SetBroker(leader1.Addr(), leader1.BrokerID()).
  404. SetLeader("my_topic", 0, leader0.BrokerID()).
  405. SetLeader("my_topic", 1, leader1.BrokerID()),
  406. })
  407. mockOffsetResponse1 := NewMockOffsetResponse(t).
  408. SetOffset("my_topic", 0, OffsetOldest, 0).
  409. SetOffset("my_topic", 0, OffsetNewest, 1000).
  410. SetOffset("my_topic", 1, OffsetOldest, 0).
  411. SetOffset("my_topic", 1, OffsetNewest, 1000)
  412. leader0.SetHandlerByMap(map[string]MockResponse{
  413. "OffsetRequest": mockOffsetResponse1,
  414. "FetchRequest": NewMockFetchResponse(t, 1),
  415. })
  416. leader1.SetHandlerByMap(map[string]MockResponse{
  417. "OffsetRequest": mockOffsetResponse1,
  418. "FetchRequest": NewMockFetchResponse(t, 1),
  419. })
  420. // launch test goroutines
  421. config := NewConfig()
  422. config.Consumer.Retry.Backoff = 50
  423. master, err := NewConsumer([]string{seedBroker.Addr()}, config)
  424. if err != nil {
  425. t.Fatal(err)
  426. }
  427. // we expect to end up (eventually) consuming exactly ten messages on each partition
  428. var wg sync.WaitGroup
  429. for i := int32(0); i < 2; i++ {
  430. consumer, err := master.ConsumePartition("my_topic", i, 0)
  431. if err != nil {
  432. t.Error(err)
  433. }
  434. go func(c PartitionConsumer) {
  435. for err := range c.Errors() {
  436. t.Error(err)
  437. }
  438. }(consumer)
  439. wg.Add(1)
  440. go func(partition int32, c PartitionConsumer) {
  441. for i := 0; i < 10; i++ {
  442. message := <-consumer.Messages()
  443. if message.Offset != int64(i) {
  444. t.Error("Incorrect message offset!", i, partition, message.Offset)
  445. }
  446. if message.Partition != partition {
  447. t.Error("Incorrect message partition!")
  448. }
  449. }
  450. safeClose(t, consumer)
  451. wg.Done()
  452. }(i, consumer)
  453. }
  454. time.Sleep(50 * time.Millisecond)
  455. Logger.Printf(" STAGE 1")
  456. // Stage 1:
  457. // * my_topic/0 -> leader0 serves 4 messages
  458. // * my_topic/1 -> leader1 serves 0 messages
  459. mockFetchResponse := NewMockFetchResponse(t, 1)
  460. for i := 0; i < 4; i++ {
  461. mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg)
  462. }
  463. leader0.SetHandlerByMap(map[string]MockResponse{
  464. "FetchRequest": mockFetchResponse,
  465. })
  466. time.Sleep(50 * time.Millisecond)
  467. Logger.Printf(" STAGE 2")
  468. // Stage 2:
  469. // * leader0 says that it is no longer serving my_topic/0
  470. // * seedBroker tells that leader1 is serving my_topic/0 now
  471. // seed broker tells that the new partition 0 leader is leader1
  472. seedBroker.SetHandlerByMap(map[string]MockResponse{
  473. "MetadataRequest": NewMockMetadataResponse(t).
  474. SetLeader("my_topic", 0, leader1.BrokerID()).
  475. SetLeader("my_topic", 1, leader1.BrokerID()),
  476. })
  477. // leader0 says no longer leader of partition 0
  478. fetchResponse := new(FetchResponse)
  479. fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
  480. leader0.SetHandlerByMap(map[string]MockResponse{
  481. "FetchRequest": NewMockWrapper(fetchResponse),
  482. })
  483. time.Sleep(50 * time.Millisecond)
  484. Logger.Printf(" STAGE 3")
  485. // Stage 3:
  486. // * my_topic/0 -> leader1 serves 3 messages
  487. // * my_topic/1 -> leader1 server 8 messages
  488. // leader1 provides 3 message on partition 0, and 8 messages on partition 1
  489. mockFetchResponse2 := NewMockFetchResponse(t, 2)
  490. for i := 4; i < 7; i++ {
  491. mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg)
  492. }
  493. for i := 0; i < 8; i++ {
  494. mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg)
  495. }
  496. leader1.SetHandlerByMap(map[string]MockResponse{
  497. "FetchRequest": mockFetchResponse2,
  498. })
  499. time.Sleep(50 * time.Millisecond)
  500. Logger.Printf(" STAGE 4")
  501. // Stage 4:
  502. // * my_topic/0 -> leader1 serves 3 messages
  503. // * my_topic/1 -> leader1 tells that it is no longer the leader
  504. // * seedBroker tells that leader0 is a new leader for my_topic/1
  505. // metadata assigns 0 to leader1 and 1 to leader0
  506. seedBroker.SetHandlerByMap(map[string]MockResponse{
  507. "MetadataRequest": NewMockMetadataResponse(t).
  508. SetLeader("my_topic", 0, leader1.BrokerID()).
  509. SetLeader("my_topic", 1, leader0.BrokerID()),
  510. })
  511. // leader1 provides three more messages on partition0, says no longer leader of partition1
  512. mockFetchResponse3 := NewMockFetchResponse(t, 3).
  513. SetMessage("my_topic", 0, int64(7), testMsg).
  514. SetMessage("my_topic", 0, int64(8), testMsg).
  515. SetMessage("my_topic", 0, int64(9), testMsg)
  516. fetchResponse4 := new(FetchResponse)
  517. fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition)
  518. leader1.SetHandlerByMap(map[string]MockResponse{
  519. "FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4),
  520. })
  521. // leader0 provides two messages on partition 1
  522. mockFetchResponse4 := NewMockFetchResponse(t, 2)
  523. for i := 8; i < 10; i++ {
  524. mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg)
  525. }
  526. leader0.SetHandlerByMap(map[string]MockResponse{
  527. "FetchRequest": mockFetchResponse4,
  528. })
  529. wg.Wait()
  530. safeClose(t, master)
  531. leader1.Close()
  532. leader0.Close()
  533. seedBroker.Close()
  534. }
  535. // When two partitions have the same broker as the leader, if one partition
  536. // consumer channel buffer is full then that does not affect the ability to
  537. // read messages by the other consumer.
  538. func TestConsumerInterleavedClose(t *testing.T) {
  539. // Given
  540. broker0 := NewMockBroker(t, 0)
  541. broker0.SetHandlerByMap(map[string]MockResponse{
  542. "MetadataRequest": NewMockMetadataResponse(t).
  543. SetBroker(broker0.Addr(), broker0.BrokerID()).
  544. SetLeader("my_topic", 0, broker0.BrokerID()).
  545. SetLeader("my_topic", 1, broker0.BrokerID()),
  546. "OffsetRequest": NewMockOffsetResponse(t).
  547. SetOffset("my_topic", 0, OffsetOldest, 1000).
  548. SetOffset("my_topic", 0, OffsetNewest, 1100).
  549. SetOffset("my_topic", 1, OffsetOldest, 2000).
  550. SetOffset("my_topic", 1, OffsetNewest, 2100),
  551. "FetchRequest": NewMockFetchResponse(t, 1).
  552. SetMessage("my_topic", 0, 1000, testMsg).
  553. SetMessage("my_topic", 0, 1001, testMsg).
  554. SetMessage("my_topic", 0, 1002, testMsg).
  555. SetMessage("my_topic", 1, 2000, testMsg),
  556. })
  557. config := NewConfig()
  558. config.ChannelBufferSize = 0
  559. master, err := NewConsumer([]string{broker0.Addr()}, config)
  560. if err != nil {
  561. t.Fatal(err)
  562. }
  563. c0, err := master.ConsumePartition("my_topic", 0, 1000)
  564. if err != nil {
  565. t.Fatal(err)
  566. }
  567. c1, err := master.ConsumePartition("my_topic", 1, 2000)
  568. if err != nil {
  569. t.Fatal(err)
  570. }
  571. // When/Then: we can read from partition 0 even if nobody reads from partition 1
  572. assertMessageOffset(t, <-c0.Messages(), 1000)
  573. assertMessageOffset(t, <-c0.Messages(), 1001)
  574. assertMessageOffset(t, <-c0.Messages(), 1002)
  575. safeClose(t, c1)
  576. safeClose(t, c0)
  577. safeClose(t, master)
  578. broker0.Close()
  579. }
  580. func TestConsumerBounceWithReferenceOpen(t *testing.T) {
  581. broker0 := NewMockBroker(t, 0)
  582. broker0Addr := broker0.Addr()
  583. broker1 := NewMockBroker(t, 1)
  584. mockMetadataResponse := NewMockMetadataResponse(t).
  585. SetBroker(broker0.Addr(), broker0.BrokerID()).
  586. SetBroker(broker1.Addr(), broker1.BrokerID()).
  587. SetLeader("my_topic", 0, broker0.BrokerID()).
  588. SetLeader("my_topic", 1, broker1.BrokerID())
  589. mockOffsetResponse := NewMockOffsetResponse(t).
  590. SetOffset("my_topic", 0, OffsetOldest, 1000).
  591. SetOffset("my_topic", 0, OffsetNewest, 1100).
  592. SetOffset("my_topic", 1, OffsetOldest, 2000).
  593. SetOffset("my_topic", 1, OffsetNewest, 2100)
  594. mockFetchResponse := NewMockFetchResponse(t, 1)
  595. for i := 0; i < 10; i++ {
  596. mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg)
  597. mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg)
  598. }
  599. broker0.SetHandlerByMap(map[string]MockResponse{
  600. "OffsetRequest": mockOffsetResponse,
  601. "FetchRequest": mockFetchResponse,
  602. })
  603. broker1.SetHandlerByMap(map[string]MockResponse{
  604. "MetadataRequest": mockMetadataResponse,
  605. "OffsetRequest": mockOffsetResponse,
  606. "FetchRequest": mockFetchResponse,
  607. })
  608. config := NewConfig()
  609. config.Consumer.Return.Errors = true
  610. config.Consumer.Retry.Backoff = 100 * time.Millisecond
  611. config.ChannelBufferSize = 1
  612. master, err := NewConsumer([]string{broker1.Addr()}, config)
  613. if err != nil {
  614. t.Fatal(err)
  615. }
  616. c0, err := master.ConsumePartition("my_topic", 0, 1000)
  617. if err != nil {
  618. t.Fatal(err)
  619. }
  620. c1, err := master.ConsumePartition("my_topic", 1, 2000)
  621. if err != nil {
  622. t.Fatal(err)
  623. }
  624. // read messages from both partition to make sure that both brokers operate
  625. // normally.
  626. assertMessageOffset(t, <-c0.Messages(), 1000)
  627. assertMessageOffset(t, <-c1.Messages(), 2000)
  628. // Simulate broker shutdown. Note that metadata response does not change,
  629. // that is the leadership does not move to another broker. So partition
  630. // consumer will keep retrying to restore the connection with the broker.
  631. broker0.Close()
  632. // Make sure that while the partition/0 leader is down, consumer/partition/1
  633. // is capable of pulling messages from broker1.
  634. for i := 1; i < 7; i++ {
  635. offset := (<-c1.Messages()).Offset
  636. if offset != int64(2000+i) {
  637. t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i))
  638. }
  639. }
  640. // Bring broker0 back to service.
  641. broker0 = NewMockBrokerAddr(t, 0, broker0Addr)
  642. broker0.SetHandlerByMap(map[string]MockResponse{
  643. "FetchRequest": mockFetchResponse,
  644. })
  645. // Read the rest of messages from both partitions.
  646. for i := 7; i < 10; i++ {
  647. assertMessageOffset(t, <-c1.Messages(), int64(2000+i))
  648. }
  649. for i := 1; i < 10; i++ {
  650. assertMessageOffset(t, <-c0.Messages(), int64(1000+i))
  651. }
  652. select {
  653. case <-c0.Errors():
  654. default:
  655. t.Errorf("Partition consumer should have detected broker restart")
  656. }
  657. safeClose(t, c1)
  658. safeClose(t, c0)
  659. safeClose(t, master)
  660. broker0.Close()
  661. broker1.Close()
  662. }
  663. func TestConsumerOffsetOutOfRange(t *testing.T) {
  664. // Given
  665. broker0 := NewMockBroker(t, 2)
  666. broker0.SetHandlerByMap(map[string]MockResponse{
  667. "MetadataRequest": NewMockMetadataResponse(t).
  668. SetBroker(broker0.Addr(), broker0.BrokerID()).
  669. SetLeader("my_topic", 0, broker0.BrokerID()),
  670. "OffsetRequest": NewMockOffsetResponse(t).
  671. SetOffset("my_topic", 0, OffsetNewest, 1234).
  672. SetOffset("my_topic", 0, OffsetOldest, 2345),
  673. })
  674. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  675. if err != nil {
  676. t.Fatal(err)
  677. }
  678. // When/Then
  679. if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange {
  680. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  681. }
  682. if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange {
  683. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  684. }
  685. if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange {
  686. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  687. }
  688. safeClose(t, master)
  689. broker0.Close()
  690. }
  691. func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
  692. if msg.Offset != expectedOffset {
  693. t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
  694. }
  695. }
  696. // This example shows how to use the consumer to read messages
  697. // from a single partition.
  698. func ExampleConsumer() {
  699. consumer, err := NewConsumer([]string{"localhost:9092"}, nil)
  700. if err != nil {
  701. panic(err)
  702. }
  703. defer func() {
  704. if err := consumer.Close(); err != nil {
  705. log.Fatalln(err)
  706. }
  707. }()
  708. partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest)
  709. if err != nil {
  710. panic(err)
  711. }
  712. defer func() {
  713. if err := partitionConsumer.Close(); err != nil {
  714. log.Fatalln(err)
  715. }
  716. }()
  717. // Trap SIGINT to trigger a shutdown.
  718. signals := make(chan os.Signal, 1)
  719. signal.Notify(signals, os.Interrupt)
  720. consumed := 0
  721. ConsumerLoop:
  722. for {
  723. select {
  724. case msg := <-partitionConsumer.Messages():
  725. log.Printf("Consumed message offset %d\n", msg.Offset)
  726. consumed++
  727. case <-signals:
  728. break ConsumerLoop
  729. }
  730. }
  731. log.Printf("Consumed: %d\n", consumed)
  732. }