consumer_test.go 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. package sarama
  2. import (
  3. "log"
  4. "os"
  5. "os/signal"
  6. "sync"
  7. "testing"
  8. "time"
  9. )
  10. var testMsg = StringEncoder("Foo")
  11. // If a particular offset is provided then messages are consumed starting from
  12. // that offset.
  13. func TestConsumerOffsetManual(t *testing.T) {
  14. // Given
  15. broker0 := NewMockBroker(t, 0)
  16. mockFetchResponse := NewMockFetchResponse(t, 1)
  17. for i := 0; i < 10; i++ {
  18. mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg)
  19. }
  20. broker0.SetHandlerByMap(map[string]MockResponse{
  21. "MetadataRequest": NewMockMetadataResponse(t).
  22. SetBroker(broker0.Addr(), broker0.BrokerID()).
  23. SetLeader("my_topic", 0, broker0.BrokerID()),
  24. "OffsetRequest": NewMockOffsetResponse(t).
  25. SetOffset("my_topic", 0, OffsetOldest, 0).
  26. SetOffset("my_topic", 0, OffsetNewest, 2345),
  27. "FetchRequest": mockFetchResponse,
  28. })
  29. // When
  30. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  31. if err != nil {
  32. t.Fatal(err)
  33. }
  34. consumer, err := master.ConsumePartition("my_topic", 0, 1234)
  35. if err != nil {
  36. t.Fatal(err)
  37. }
  38. // Then: messages starting from offset 1234 are consumed.
  39. for i := 0; i < 10; i++ {
  40. select {
  41. case message := <-consumer.Messages():
  42. assertMessageOffset(t, message, int64(i+1234))
  43. case err := <-consumer.Errors():
  44. t.Error(err)
  45. }
  46. }
  47. safeClose(t, consumer)
  48. safeClose(t, master)
  49. broker0.Close()
  50. }
  51. // If `OffsetNewest` is passed as the initial offset then the first consumed
  52. // message is indeed corresponds to the offset that broker claims to be the
  53. // newest in its metadata response.
  54. func TestConsumerOffsetNewest(t *testing.T) {
  55. // Given
  56. broker0 := NewMockBroker(t, 0)
  57. broker0.SetHandlerByMap(map[string]MockResponse{
  58. "MetadataRequest": NewMockMetadataResponse(t).
  59. SetBroker(broker0.Addr(), broker0.BrokerID()).
  60. SetLeader("my_topic", 0, broker0.BrokerID()),
  61. "OffsetRequest": NewMockOffsetResponse(t).
  62. SetOffset("my_topic", 0, OffsetNewest, 10).
  63. SetOffset("my_topic", 0, OffsetOldest, 7),
  64. "FetchRequest": NewMockFetchResponse(t, 1).
  65. SetMessage("my_topic", 0, 9, testMsg).
  66. SetMessage("my_topic", 0, 10, testMsg).
  67. SetMessage("my_topic", 0, 11, testMsg).
  68. SetHighWaterMark("my_topic", 0, 14),
  69. })
  70. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  71. if err != nil {
  72. t.Fatal(err)
  73. }
  74. // When
  75. consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
  76. if err != nil {
  77. t.Fatal(err)
  78. }
  79. // Then
  80. assertMessageOffset(t, <-consumer.Messages(), 10)
  81. if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 {
  82. t.Errorf("Expected high water mark offset 14, found %d", hwmo)
  83. }
  84. safeClose(t, consumer)
  85. safeClose(t, master)
  86. broker0.Close()
  87. }
  88. // It is possible to close a partition consumer and create the same anew.
  89. func TestConsumerRecreate(t *testing.T) {
  90. // Given
  91. broker0 := NewMockBroker(t, 0)
  92. broker0.SetHandlerByMap(map[string]MockResponse{
  93. "MetadataRequest": NewMockMetadataResponse(t).
  94. SetBroker(broker0.Addr(), broker0.BrokerID()).
  95. SetLeader("my_topic", 0, broker0.BrokerID()),
  96. "OffsetRequest": NewMockOffsetResponse(t).
  97. SetOffset("my_topic", 0, OffsetOldest, 0).
  98. SetOffset("my_topic", 0, OffsetNewest, 1000),
  99. "FetchRequest": NewMockFetchResponse(t, 1).
  100. SetMessage("my_topic", 0, 10, testMsg),
  101. })
  102. c, err := NewConsumer([]string{broker0.Addr()}, nil)
  103. if err != nil {
  104. t.Fatal(err)
  105. }
  106. pc, err := c.ConsumePartition("my_topic", 0, 10)
  107. if err != nil {
  108. t.Fatal(err)
  109. }
  110. assertMessageOffset(t, <-pc.Messages(), 10)
  111. // When
  112. safeClose(t, pc)
  113. pc, err = c.ConsumePartition("my_topic", 0, 10)
  114. if err != nil {
  115. t.Fatal(err)
  116. }
  117. // Then
  118. assertMessageOffset(t, <-pc.Messages(), 10)
  119. safeClose(t, pc)
  120. safeClose(t, c)
  121. broker0.Close()
  122. }
  123. // An attempt to consume the same partition twice should fail.
  124. func TestConsumerDuplicate(t *testing.T) {
  125. // Given
  126. broker0 := NewMockBroker(t, 0)
  127. broker0.SetHandlerByMap(map[string]MockResponse{
  128. "MetadataRequest": NewMockMetadataResponse(t).
  129. SetBroker(broker0.Addr(), broker0.BrokerID()).
  130. SetLeader("my_topic", 0, broker0.BrokerID()),
  131. "OffsetRequest": NewMockOffsetResponse(t).
  132. SetOffset("my_topic", 0, OffsetOldest, 0).
  133. SetOffset("my_topic", 0, OffsetNewest, 1000),
  134. "FetchRequest": NewMockFetchResponse(t, 1),
  135. })
  136. config := NewConfig()
  137. config.ChannelBufferSize = 0
  138. c, err := NewConsumer([]string{broker0.Addr()}, config)
  139. if err != nil {
  140. t.Fatal(err)
  141. }
  142. pc1, err := c.ConsumePartition("my_topic", 0, 0)
  143. if err != nil {
  144. t.Fatal(err)
  145. }
  146. // When
  147. pc2, err := c.ConsumePartition("my_topic", 0, 0)
  148. // Then
  149. if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") {
  150. t.Fatal("A partition cannot be consumed twice at the same time")
  151. }
  152. safeClose(t, pc1)
  153. safeClose(t, c)
  154. broker0.Close()
  155. }
  156. // If consumer fails to refresh metadata it keeps retrying with frequency
  157. // specified by `Config.Consumer.Retry.Backoff`.
  158. func TestConsumerLeaderRefreshError(t *testing.T) {
  159. // Given
  160. broker0 := NewMockBroker(t, 100)
  161. // Stage 1: my_topic/0 served by broker0
  162. Logger.Printf(" STAGE 1")
  163. broker0.SetHandlerByMap(map[string]MockResponse{
  164. "MetadataRequest": NewMockMetadataResponse(t).
  165. SetBroker(broker0.Addr(), broker0.BrokerID()).
  166. SetLeader("my_topic", 0, broker0.BrokerID()),
  167. "OffsetRequest": NewMockOffsetResponse(t).
  168. SetOffset("my_topic", 0, OffsetOldest, 123).
  169. SetOffset("my_topic", 0, OffsetNewest, 1000),
  170. "FetchRequest": NewMockFetchResponse(t, 1).
  171. SetMessage("my_topic", 0, 123, testMsg),
  172. })
  173. config := NewConfig()
  174. config.Net.ReadTimeout = 100 * time.Millisecond
  175. config.Consumer.Retry.Backoff = 200 * time.Millisecond
  176. config.Consumer.Return.Errors = true
  177. config.Metadata.Retry.Max = 0
  178. c, err := NewConsumer([]string{broker0.Addr()}, config)
  179. if err != nil {
  180. t.Fatal(err)
  181. }
  182. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  183. if err != nil {
  184. t.Fatal(err)
  185. }
  186. assertMessageOffset(t, <-pc.Messages(), 123)
  187. // Stage 2: broker0 says that it is no longer the leader for my_topic/0,
  188. // but the requests to retrieve metadata fail with network timeout.
  189. Logger.Printf(" STAGE 2")
  190. fetchResponse2 := &FetchResponse{}
  191. fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
  192. broker0.SetHandlerByMap(map[string]MockResponse{
  193. "FetchRequest": NewMockWrapper(fetchResponse2),
  194. })
  195. if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
  196. t.Errorf("Unexpected error: %v", consErr.Err)
  197. }
  198. // Stage 3: finally the metadata returned by broker0 tells that broker1 is
  199. // a new leader for my_topic/0. Consumption resumes.
  200. Logger.Printf(" STAGE 3")
  201. broker1 := NewMockBroker(t, 101)
  202. broker1.SetHandlerByMap(map[string]MockResponse{
  203. "FetchRequest": NewMockFetchResponse(t, 1).
  204. SetMessage("my_topic", 0, 124, testMsg),
  205. })
  206. broker0.SetHandlerByMap(map[string]MockResponse{
  207. "MetadataRequest": NewMockMetadataResponse(t).
  208. SetBroker(broker0.Addr(), broker0.BrokerID()).
  209. SetBroker(broker1.Addr(), broker1.BrokerID()).
  210. SetLeader("my_topic", 0, broker1.BrokerID()),
  211. })
  212. assertMessageOffset(t, <-pc.Messages(), 124)
  213. safeClose(t, pc)
  214. safeClose(t, c)
  215. broker1.Close()
  216. broker0.Close()
  217. }
  218. func TestConsumerInvalidTopic(t *testing.T) {
  219. // Given
  220. broker0 := NewMockBroker(t, 100)
  221. broker0.SetHandlerByMap(map[string]MockResponse{
  222. "MetadataRequest": NewMockMetadataResponse(t).
  223. SetBroker(broker0.Addr(), broker0.BrokerID()),
  224. })
  225. c, err := NewConsumer([]string{broker0.Addr()}, nil)
  226. if err != nil {
  227. t.Fatal(err)
  228. }
  229. // When
  230. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  231. // Then
  232. if pc != nil || err != ErrUnknownTopicOrPartition {
  233. t.Errorf("Should fail with, err=%v", err)
  234. }
  235. safeClose(t, c)
  236. broker0.Close()
  237. }
  238. // Nothing bad happens if a partition consumer that has no leader assigned at
  239. // the moment is closed.
  240. func TestConsumerClosePartitionWithoutLeader(t *testing.T) {
  241. // Given
  242. broker0 := NewMockBroker(t, 100)
  243. broker0.SetHandlerByMap(map[string]MockResponse{
  244. "MetadataRequest": NewMockMetadataResponse(t).
  245. SetBroker(broker0.Addr(), broker0.BrokerID()).
  246. SetLeader("my_topic", 0, broker0.BrokerID()),
  247. "OffsetRequest": NewMockOffsetResponse(t).
  248. SetOffset("my_topic", 0, OffsetOldest, 123).
  249. SetOffset("my_topic", 0, OffsetNewest, 1000),
  250. "FetchRequest": NewMockFetchResponse(t, 1).
  251. SetMessage("my_topic", 0, 123, testMsg),
  252. })
  253. config := NewConfig()
  254. config.Net.ReadTimeout = 100 * time.Millisecond
  255. config.Consumer.Retry.Backoff = 100 * time.Millisecond
  256. config.Consumer.Return.Errors = true
  257. config.Metadata.Retry.Max = 0
  258. c, err := NewConsumer([]string{broker0.Addr()}, config)
  259. if err != nil {
  260. t.Fatal(err)
  261. }
  262. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  263. if err != nil {
  264. t.Fatal(err)
  265. }
  266. assertMessageOffset(t, <-pc.Messages(), 123)
  267. // broker0 says that it is no longer the leader for my_topic/0, but the
  268. // requests to retrieve metadata fail with network timeout.
  269. fetchResponse2 := &FetchResponse{}
  270. fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
  271. broker0.SetHandlerByMap(map[string]MockResponse{
  272. "FetchRequest": NewMockWrapper(fetchResponse2),
  273. })
  274. // When
  275. if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
  276. t.Errorf("Unexpected error: %v", consErr.Err)
  277. }
  278. // Then: the partition consumer can be closed without any problem.
  279. safeClose(t, pc)
  280. safeClose(t, c)
  281. broker0.Close()
  282. }
  283. // If the initial offset passed on partition consumer creation is out of the
  284. // actual offset range for the partition, then the partition consumer stops
  285. // immediately closing its output channels.
  286. func TestConsumerShutsDownOutOfRange(t *testing.T) {
  287. // Given
  288. broker0 := NewMockBroker(t, 0)
  289. fetchResponse := new(FetchResponse)
  290. fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
  291. broker0.SetHandlerByMap(map[string]MockResponse{
  292. "MetadataRequest": NewMockMetadataResponse(t).
  293. SetBroker(broker0.Addr(), broker0.BrokerID()).
  294. SetLeader("my_topic", 0, broker0.BrokerID()),
  295. "OffsetRequest": NewMockOffsetResponse(t).
  296. SetOffset("my_topic", 0, OffsetNewest, 1234).
  297. SetOffset("my_topic", 0, OffsetOldest, 7),
  298. "FetchRequest": NewMockWrapper(fetchResponse),
  299. })
  300. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  301. if err != nil {
  302. t.Fatal(err)
  303. }
  304. // When
  305. consumer, err := master.ConsumePartition("my_topic", 0, 101)
  306. if err != nil {
  307. t.Fatal(err)
  308. }
  309. // Then: consumer should shut down closing its messages and errors channels.
  310. if _, ok := <-consumer.Messages(); ok {
  311. t.Error("Expected the consumer to shut down")
  312. }
  313. safeClose(t, consumer)
  314. safeClose(t, master)
  315. broker0.Close()
  316. }
  317. // If a fetch response contains messages with offsets that are smaller then
  318. // requested, then such messages are ignored.
  319. func TestConsumerExtraOffsets(t *testing.T) {
  320. // Given
  321. legacyFetchResponse := &FetchResponse{}
  322. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1)
  323. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2)
  324. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3)
  325. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4)
  326. newFetchResponse := &FetchResponse{Version: 4}
  327. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 1)
  328. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 2)
  329. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 3)
  330. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 4)
  331. newFetchResponse.SetLastOffsetDelta("my_topic", 0, 4)
  332. newFetchResponse.SetLastStableOffset("my_topic", 0, 4)
  333. for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
  334. var offsetResponseVersion int16
  335. cfg := NewConfig()
  336. cfg.Consumer.Return.Errors = true
  337. if fetchResponse1.Version >= 4 {
  338. cfg.Version = V0_11_0_0
  339. offsetResponseVersion = 1
  340. }
  341. broker0 := NewMockBroker(t, 0)
  342. fetchResponse2 := &FetchResponse{}
  343. fetchResponse2.Version = fetchResponse1.Version
  344. fetchResponse2.AddError("my_topic", 0, ErrNoError)
  345. broker0.SetHandlerByMap(map[string]MockResponse{
  346. "MetadataRequest": NewMockMetadataResponse(t).
  347. SetBroker(broker0.Addr(), broker0.BrokerID()).
  348. SetLeader("my_topic", 0, broker0.BrokerID()),
  349. "OffsetRequest": NewMockOffsetResponse(t).
  350. SetVersion(offsetResponseVersion).
  351. SetOffset("my_topic", 0, OffsetNewest, 1234).
  352. SetOffset("my_topic", 0, OffsetOldest, 0),
  353. "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
  354. })
  355. master, err := NewConsumer([]string{broker0.Addr()}, cfg)
  356. if err != nil {
  357. t.Fatal(err)
  358. }
  359. // When
  360. consumer, err := master.ConsumePartition("my_topic", 0, 3)
  361. if err != nil {
  362. t.Fatal(err)
  363. }
  364. // Then: messages with offsets 1 and 2 are not returned even though they
  365. // are present in the response.
  366. select {
  367. case msg := <-consumer.Messages():
  368. assertMessageOffset(t, msg, 3)
  369. case err := <-consumer.Errors():
  370. t.Fatal(err)
  371. }
  372. select {
  373. case msg := <-consumer.Messages():
  374. assertMessageOffset(t, msg, 4)
  375. case err := <-consumer.Errors():
  376. t.Fatal(err)
  377. }
  378. safeClose(t, consumer)
  379. safeClose(t, master)
  380. broker0.Close()
  381. }
  382. }
  383. func TestConsumeMessageWithNewerFetchAPIVersion(t *testing.T) {
  384. // Given
  385. fetchResponse1 := &FetchResponse{Version: 4}
  386. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
  387. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
  388. cfg := NewConfig()
  389. cfg.Version = V0_11_0_0
  390. broker0 := NewMockBroker(t, 0)
  391. fetchResponse2 := &FetchResponse{}
  392. fetchResponse2.Version = 4
  393. fetchResponse2.AddError("my_topic", 0, ErrNoError)
  394. broker0.SetHandlerByMap(map[string]MockResponse{
  395. "MetadataRequest": NewMockMetadataResponse(t).
  396. SetBroker(broker0.Addr(), broker0.BrokerID()).
  397. SetLeader("my_topic", 0, broker0.BrokerID()),
  398. "OffsetRequest": NewMockOffsetResponse(t).
  399. SetVersion(1).
  400. SetOffset("my_topic", 0, OffsetNewest, 1234).
  401. SetOffset("my_topic", 0, OffsetOldest, 0),
  402. "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
  403. })
  404. master, err := NewConsumer([]string{broker0.Addr()}, cfg)
  405. if err != nil {
  406. t.Fatal(err)
  407. }
  408. // When
  409. consumer, err := master.ConsumePartition("my_topic", 0, 1)
  410. if err != nil {
  411. t.Fatal(err)
  412. }
  413. assertMessageOffset(t, <-consumer.Messages(), 1)
  414. assertMessageOffset(t, <-consumer.Messages(), 2)
  415. safeClose(t, consumer)
  416. safeClose(t, master)
  417. broker0.Close()
  418. }
  419. // It is fine if offsets of fetched messages are not sequential (although
  420. // strictly increasing!).
  421. func TestConsumerNonSequentialOffsets(t *testing.T) {
  422. // Given
  423. legacyFetchResponse := &FetchResponse{}
  424. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5)
  425. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7)
  426. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11)
  427. newFetchResponse := &FetchResponse{Version: 4}
  428. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 5)
  429. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 7)
  430. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 11)
  431. newFetchResponse.SetLastOffsetDelta("my_topic", 0, 11)
  432. newFetchResponse.SetLastStableOffset("my_topic", 0, 11)
  433. for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
  434. var offsetResponseVersion int16
  435. cfg := NewConfig()
  436. if fetchResponse1.Version >= 4 {
  437. cfg.Version = V0_11_0_0
  438. offsetResponseVersion = 1
  439. }
  440. broker0 := NewMockBroker(t, 0)
  441. fetchResponse2 := &FetchResponse{Version: fetchResponse1.Version}
  442. fetchResponse2.AddError("my_topic", 0, ErrNoError)
  443. broker0.SetHandlerByMap(map[string]MockResponse{
  444. "MetadataRequest": NewMockMetadataResponse(t).
  445. SetBroker(broker0.Addr(), broker0.BrokerID()).
  446. SetLeader("my_topic", 0, broker0.BrokerID()),
  447. "OffsetRequest": NewMockOffsetResponse(t).
  448. SetVersion(offsetResponseVersion).
  449. SetOffset("my_topic", 0, OffsetNewest, 1234).
  450. SetOffset("my_topic", 0, OffsetOldest, 0),
  451. "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
  452. })
  453. master, err := NewConsumer([]string{broker0.Addr()}, cfg)
  454. if err != nil {
  455. t.Fatal(err)
  456. }
  457. // When
  458. consumer, err := master.ConsumePartition("my_topic", 0, 3)
  459. if err != nil {
  460. t.Fatal(err)
  461. }
  462. // Then: messages with offsets 1 and 2 are not returned even though they
  463. // are present in the response.
  464. assertMessageOffset(t, <-consumer.Messages(), 5)
  465. assertMessageOffset(t, <-consumer.Messages(), 7)
  466. assertMessageOffset(t, <-consumer.Messages(), 11)
  467. safeClose(t, consumer)
  468. safeClose(t, master)
  469. broker0.Close()
  470. }
  471. }
  472. // If leadership for a partition is changing then consumer resolves the new
  473. // leader and switches to it.
  474. func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
  475. // initial setup
  476. seedBroker := NewMockBroker(t, 10)
  477. leader0 := NewMockBroker(t, 0)
  478. leader1 := NewMockBroker(t, 1)
  479. seedBroker.SetHandlerByMap(map[string]MockResponse{
  480. "MetadataRequest": NewMockMetadataResponse(t).
  481. SetBroker(leader0.Addr(), leader0.BrokerID()).
  482. SetBroker(leader1.Addr(), leader1.BrokerID()).
  483. SetLeader("my_topic", 0, leader0.BrokerID()).
  484. SetLeader("my_topic", 1, leader1.BrokerID()),
  485. })
  486. mockOffsetResponse1 := NewMockOffsetResponse(t).
  487. SetOffset("my_topic", 0, OffsetOldest, 0).
  488. SetOffset("my_topic", 0, OffsetNewest, 1000).
  489. SetOffset("my_topic", 1, OffsetOldest, 0).
  490. SetOffset("my_topic", 1, OffsetNewest, 1000)
  491. leader0.SetHandlerByMap(map[string]MockResponse{
  492. "OffsetRequest": mockOffsetResponse1,
  493. "FetchRequest": NewMockFetchResponse(t, 1),
  494. })
  495. leader1.SetHandlerByMap(map[string]MockResponse{
  496. "OffsetRequest": mockOffsetResponse1,
  497. "FetchRequest": NewMockFetchResponse(t, 1),
  498. })
  499. // launch test goroutines
  500. config := NewConfig()
  501. config.Consumer.Retry.Backoff = 50
  502. master, err := NewConsumer([]string{seedBroker.Addr()}, config)
  503. if err != nil {
  504. t.Fatal(err)
  505. }
  506. // we expect to end up (eventually) consuming exactly ten messages on each partition
  507. var wg sync.WaitGroup
  508. for i := int32(0); i < 2; i++ {
  509. consumer, err := master.ConsumePartition("my_topic", i, 0)
  510. if err != nil {
  511. t.Error(err)
  512. }
  513. go func(c PartitionConsumer) {
  514. for err := range c.Errors() {
  515. t.Error(err)
  516. }
  517. }(consumer)
  518. wg.Add(1)
  519. go func(partition int32, c PartitionConsumer) {
  520. for i := 0; i < 10; i++ {
  521. message := <-consumer.Messages()
  522. if message.Offset != int64(i) {
  523. t.Error("Incorrect message offset!", i, partition, message.Offset)
  524. }
  525. if message.Partition != partition {
  526. t.Error("Incorrect message partition!")
  527. }
  528. }
  529. safeClose(t, consumer)
  530. wg.Done()
  531. }(i, consumer)
  532. }
  533. time.Sleep(50 * time.Millisecond)
  534. Logger.Printf(" STAGE 1")
  535. // Stage 1:
  536. // * my_topic/0 -> leader0 serves 4 messages
  537. // * my_topic/1 -> leader1 serves 0 messages
  538. mockFetchResponse := NewMockFetchResponse(t, 1)
  539. for i := 0; i < 4; i++ {
  540. mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg)
  541. }
  542. leader0.SetHandlerByMap(map[string]MockResponse{
  543. "FetchRequest": mockFetchResponse,
  544. })
  545. time.Sleep(50 * time.Millisecond)
  546. Logger.Printf(" STAGE 2")
  547. // Stage 2:
  548. // * leader0 says that it is no longer serving my_topic/0
  549. // * seedBroker tells that leader1 is serving my_topic/0 now
  550. // seed broker tells that the new partition 0 leader is leader1
  551. seedBroker.SetHandlerByMap(map[string]MockResponse{
  552. "MetadataRequest": NewMockMetadataResponse(t).
  553. SetLeader("my_topic", 0, leader1.BrokerID()).
  554. SetLeader("my_topic", 1, leader1.BrokerID()),
  555. })
  556. // leader0 says no longer leader of partition 0
  557. fetchResponse := new(FetchResponse)
  558. fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
  559. leader0.SetHandlerByMap(map[string]MockResponse{
  560. "FetchRequest": NewMockWrapper(fetchResponse),
  561. })
  562. time.Sleep(50 * time.Millisecond)
  563. Logger.Printf(" STAGE 3")
  564. // Stage 3:
  565. // * my_topic/0 -> leader1 serves 3 messages
  566. // * my_topic/1 -> leader1 server 8 messages
  567. // leader1 provides 3 message on partition 0, and 8 messages on partition 1
  568. mockFetchResponse2 := NewMockFetchResponse(t, 2)
  569. for i := 4; i < 7; i++ {
  570. mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg)
  571. }
  572. for i := 0; i < 8; i++ {
  573. mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg)
  574. }
  575. leader1.SetHandlerByMap(map[string]MockResponse{
  576. "FetchRequest": mockFetchResponse2,
  577. })
  578. time.Sleep(50 * time.Millisecond)
  579. Logger.Printf(" STAGE 4")
  580. // Stage 4:
  581. // * my_topic/0 -> leader1 serves 3 messages
  582. // * my_topic/1 -> leader1 tells that it is no longer the leader
  583. // * seedBroker tells that leader0 is a new leader for my_topic/1
  584. // metadata assigns 0 to leader1 and 1 to leader0
  585. seedBroker.SetHandlerByMap(map[string]MockResponse{
  586. "MetadataRequest": NewMockMetadataResponse(t).
  587. SetLeader("my_topic", 0, leader1.BrokerID()).
  588. SetLeader("my_topic", 1, leader0.BrokerID()),
  589. })
  590. // leader1 provides three more messages on partition0, says no longer leader of partition1
  591. mockFetchResponse3 := NewMockFetchResponse(t, 3).
  592. SetMessage("my_topic", 0, int64(7), testMsg).
  593. SetMessage("my_topic", 0, int64(8), testMsg).
  594. SetMessage("my_topic", 0, int64(9), testMsg)
  595. fetchResponse4 := new(FetchResponse)
  596. fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition)
  597. leader1.SetHandlerByMap(map[string]MockResponse{
  598. "FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4),
  599. })
  600. // leader0 provides two messages on partition 1
  601. mockFetchResponse4 := NewMockFetchResponse(t, 2)
  602. for i := 8; i < 10; i++ {
  603. mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg)
  604. }
  605. leader0.SetHandlerByMap(map[string]MockResponse{
  606. "FetchRequest": mockFetchResponse4,
  607. })
  608. wg.Wait()
  609. safeClose(t, master)
  610. leader1.Close()
  611. leader0.Close()
  612. seedBroker.Close()
  613. }
  614. // When two partitions have the same broker as the leader, if one partition
  615. // consumer channel buffer is full then that does not affect the ability to
  616. // read messages by the other consumer.
  617. func TestConsumerInterleavedClose(t *testing.T) {
  618. // Given
  619. broker0 := NewMockBroker(t, 0)
  620. broker0.SetHandlerByMap(map[string]MockResponse{
  621. "MetadataRequest": NewMockMetadataResponse(t).
  622. SetBroker(broker0.Addr(), broker0.BrokerID()).
  623. SetLeader("my_topic", 0, broker0.BrokerID()).
  624. SetLeader("my_topic", 1, broker0.BrokerID()),
  625. "OffsetRequest": NewMockOffsetResponse(t).
  626. SetOffset("my_topic", 0, OffsetOldest, 1000).
  627. SetOffset("my_topic", 0, OffsetNewest, 1100).
  628. SetOffset("my_topic", 1, OffsetOldest, 2000).
  629. SetOffset("my_topic", 1, OffsetNewest, 2100),
  630. "FetchRequest": NewMockFetchResponse(t, 1).
  631. SetMessage("my_topic", 0, 1000, testMsg).
  632. SetMessage("my_topic", 0, 1001, testMsg).
  633. SetMessage("my_topic", 0, 1002, testMsg).
  634. SetMessage("my_topic", 1, 2000, testMsg),
  635. })
  636. config := NewConfig()
  637. config.ChannelBufferSize = 0
  638. master, err := NewConsumer([]string{broker0.Addr()}, config)
  639. if err != nil {
  640. t.Fatal(err)
  641. }
  642. c0, err := master.ConsumePartition("my_topic", 0, 1000)
  643. if err != nil {
  644. t.Fatal(err)
  645. }
  646. c1, err := master.ConsumePartition("my_topic", 1, 2000)
  647. if err != nil {
  648. t.Fatal(err)
  649. }
  650. // When/Then: we can read from partition 0 even if nobody reads from partition 1
  651. assertMessageOffset(t, <-c0.Messages(), 1000)
  652. assertMessageOffset(t, <-c0.Messages(), 1001)
  653. assertMessageOffset(t, <-c0.Messages(), 1002)
  654. safeClose(t, c1)
  655. safeClose(t, c0)
  656. safeClose(t, master)
  657. broker0.Close()
  658. }
  659. func TestConsumerBounceWithReferenceOpen(t *testing.T) {
  660. broker0 := NewMockBroker(t, 0)
  661. broker0Addr := broker0.Addr()
  662. broker1 := NewMockBroker(t, 1)
  663. mockMetadataResponse := NewMockMetadataResponse(t).
  664. SetBroker(broker0.Addr(), broker0.BrokerID()).
  665. SetBroker(broker1.Addr(), broker1.BrokerID()).
  666. SetLeader("my_topic", 0, broker0.BrokerID()).
  667. SetLeader("my_topic", 1, broker1.BrokerID())
  668. mockOffsetResponse := NewMockOffsetResponse(t).
  669. SetOffset("my_topic", 0, OffsetOldest, 1000).
  670. SetOffset("my_topic", 0, OffsetNewest, 1100).
  671. SetOffset("my_topic", 1, OffsetOldest, 2000).
  672. SetOffset("my_topic", 1, OffsetNewest, 2100)
  673. mockFetchResponse := NewMockFetchResponse(t, 1)
  674. for i := 0; i < 10; i++ {
  675. mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg)
  676. mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg)
  677. }
  678. broker0.SetHandlerByMap(map[string]MockResponse{
  679. "OffsetRequest": mockOffsetResponse,
  680. "FetchRequest": mockFetchResponse,
  681. })
  682. broker1.SetHandlerByMap(map[string]MockResponse{
  683. "MetadataRequest": mockMetadataResponse,
  684. "OffsetRequest": mockOffsetResponse,
  685. "FetchRequest": mockFetchResponse,
  686. })
  687. config := NewConfig()
  688. config.Consumer.Return.Errors = true
  689. config.Consumer.Retry.Backoff = 100 * time.Millisecond
  690. config.ChannelBufferSize = 1
  691. master, err := NewConsumer([]string{broker1.Addr()}, config)
  692. if err != nil {
  693. t.Fatal(err)
  694. }
  695. c0, err := master.ConsumePartition("my_topic", 0, 1000)
  696. if err != nil {
  697. t.Fatal(err)
  698. }
  699. c1, err := master.ConsumePartition("my_topic", 1, 2000)
  700. if err != nil {
  701. t.Fatal(err)
  702. }
  703. // read messages from both partition to make sure that both brokers operate
  704. // normally.
  705. assertMessageOffset(t, <-c0.Messages(), 1000)
  706. assertMessageOffset(t, <-c1.Messages(), 2000)
  707. // Simulate broker shutdown. Note that metadata response does not change,
  708. // that is the leadership does not move to another broker. So partition
  709. // consumer will keep retrying to restore the connection with the broker.
  710. broker0.Close()
  711. // Make sure that while the partition/0 leader is down, consumer/partition/1
  712. // is capable of pulling messages from broker1.
  713. for i := 1; i < 7; i++ {
  714. offset := (<-c1.Messages()).Offset
  715. if offset != int64(2000+i) {
  716. t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i))
  717. }
  718. }
  719. // Bring broker0 back to service.
  720. broker0 = NewMockBrokerAddr(t, 0, broker0Addr)
  721. broker0.SetHandlerByMap(map[string]MockResponse{
  722. "FetchRequest": mockFetchResponse,
  723. })
  724. // Read the rest of messages from both partitions.
  725. for i := 7; i < 10; i++ {
  726. assertMessageOffset(t, <-c1.Messages(), int64(2000+i))
  727. }
  728. for i := 1; i < 10; i++ {
  729. assertMessageOffset(t, <-c0.Messages(), int64(1000+i))
  730. }
  731. select {
  732. case <-c0.Errors():
  733. default:
  734. t.Errorf("Partition consumer should have detected broker restart")
  735. }
  736. safeClose(t, c1)
  737. safeClose(t, c0)
  738. safeClose(t, master)
  739. broker0.Close()
  740. broker1.Close()
  741. }
  742. func TestConsumerOffsetOutOfRange(t *testing.T) {
  743. // Given
  744. broker0 := NewMockBroker(t, 2)
  745. broker0.SetHandlerByMap(map[string]MockResponse{
  746. "MetadataRequest": NewMockMetadataResponse(t).
  747. SetBroker(broker0.Addr(), broker0.BrokerID()).
  748. SetLeader("my_topic", 0, broker0.BrokerID()),
  749. "OffsetRequest": NewMockOffsetResponse(t).
  750. SetOffset("my_topic", 0, OffsetNewest, 1234).
  751. SetOffset("my_topic", 0, OffsetOldest, 2345),
  752. })
  753. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  754. if err != nil {
  755. t.Fatal(err)
  756. }
  757. // When/Then
  758. if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange {
  759. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  760. }
  761. if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange {
  762. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  763. }
  764. if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange {
  765. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  766. }
  767. safeClose(t, master)
  768. broker0.Close()
  769. }
  770. func TestConsumerExpiryTicker(t *testing.T) {
  771. // Given
  772. broker0 := NewMockBroker(t, 0)
  773. fetchResponse1 := &FetchResponse{}
  774. for i := 1; i <= 8; i++ {
  775. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i))
  776. }
  777. broker0.SetHandlerByMap(map[string]MockResponse{
  778. "MetadataRequest": NewMockMetadataResponse(t).
  779. SetBroker(broker0.Addr(), broker0.BrokerID()).
  780. SetLeader("my_topic", 0, broker0.BrokerID()),
  781. "OffsetRequest": NewMockOffsetResponse(t).
  782. SetOffset("my_topic", 0, OffsetNewest, 1234).
  783. SetOffset("my_topic", 0, OffsetOldest, 1),
  784. "FetchRequest": NewMockSequence(fetchResponse1),
  785. })
  786. config := NewConfig()
  787. config.ChannelBufferSize = 0
  788. config.Consumer.MaxProcessingTime = 10 * time.Millisecond
  789. master, err := NewConsumer([]string{broker0.Addr()}, config)
  790. if err != nil {
  791. t.Fatal(err)
  792. }
  793. // When
  794. consumer, err := master.ConsumePartition("my_topic", 0, 1)
  795. if err != nil {
  796. t.Fatal(err)
  797. }
  798. // Then: messages with offsets 1 through 8 are read
  799. for i := 1; i <= 8; i++ {
  800. assertMessageOffset(t, <-consumer.Messages(), int64(i))
  801. time.Sleep(2 * time.Millisecond)
  802. }
  803. safeClose(t, consumer)
  804. safeClose(t, master)
  805. broker0.Close()
  806. }
  807. func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
  808. if msg.Offset != expectedOffset {
  809. t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
  810. }
  811. }
  812. // This example shows how to use the consumer to read messages
  813. // from a single partition.
  814. func ExampleConsumer() {
  815. consumer, err := NewConsumer([]string{"localhost:9092"}, nil)
  816. if err != nil {
  817. panic(err)
  818. }
  819. defer func() {
  820. if err := consumer.Close(); err != nil {
  821. log.Fatalln(err)
  822. }
  823. }()
  824. partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest)
  825. if err != nil {
  826. panic(err)
  827. }
  828. defer func() {
  829. if err := partitionConsumer.Close(); err != nil {
  830. log.Fatalln(err)
  831. }
  832. }()
  833. // Trap SIGINT to trigger a shutdown.
  834. signals := make(chan os.Signal, 1)
  835. signal.Notify(signals, os.Interrupt)
  836. consumed := 0
  837. ConsumerLoop:
  838. for {
  839. select {
  840. case msg := <-partitionConsumer.Messages():
  841. log.Printf("Consumed message offset %d\n", msg.Offset)
  842. consumed++
  843. case <-signals:
  844. break ConsumerLoop
  845. }
  846. }
  847. log.Printf("Consumed: %d\n", consumed)
  848. }