consumer_test.go 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. package sarama
  2. import (
  3. "log"
  4. "os"
  5. "os/signal"
  6. "sync"
  7. "testing"
  8. "time"
  9. )
  10. var testMsg = StringEncoder("Foo")
  11. // If a particular offset is provided then messages are consumed starting from
  12. // that offset.
  13. func TestConsumerOffsetManual(t *testing.T) {
  14. // Given
  15. broker0 := NewMockBroker(t, 0)
  16. mockFetchResponse := NewMockFetchResponse(t, 1)
  17. for i := 0; i < 10; i++ {
  18. mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg)
  19. }
  20. broker0.SetHandlerByMap(map[string]MockResponse{
  21. "MetadataRequest": NewMockMetadataResponse(t).
  22. SetBroker(broker0.Addr(), broker0.BrokerID()).
  23. SetLeader("my_topic", 0, broker0.BrokerID()),
  24. "OffsetRequest": NewMockOffsetResponse(t).
  25. SetOffset("my_topic", 0, OffsetOldest, 0).
  26. SetOffset("my_topic", 0, OffsetNewest, 2345),
  27. "FetchRequest": mockFetchResponse,
  28. })
  29. // When
  30. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  31. if err != nil {
  32. t.Fatal(err)
  33. }
  34. consumer, err := master.ConsumePartition("my_topic", 0, 1234)
  35. if err != nil {
  36. t.Fatal(err)
  37. }
  38. // Then: messages starting from offset 1234 are consumed.
  39. for i := 0; i < 10; i++ {
  40. select {
  41. case message := <-consumer.Messages():
  42. assertMessageOffset(t, message, int64(i+1234))
  43. case err := <-consumer.Errors():
  44. t.Error(err)
  45. }
  46. }
  47. safeClose(t, consumer)
  48. safeClose(t, master)
  49. broker0.Close()
  50. }
  51. // If `OffsetNewest` is passed as the initial offset then the first consumed
  52. // message is indeed corresponds to the offset that broker claims to be the
  53. // newest in its metadata response.
  54. func TestConsumerOffsetNewest(t *testing.T) {
  55. // Given
  56. broker0 := NewMockBroker(t, 0)
  57. broker0.SetHandlerByMap(map[string]MockResponse{
  58. "MetadataRequest": NewMockMetadataResponse(t).
  59. SetBroker(broker0.Addr(), broker0.BrokerID()).
  60. SetLeader("my_topic", 0, broker0.BrokerID()),
  61. "OffsetRequest": NewMockOffsetResponse(t).
  62. SetOffset("my_topic", 0, OffsetNewest, 10).
  63. SetOffset("my_topic", 0, OffsetOldest, 7),
  64. "FetchRequest": NewMockFetchResponse(t, 1).
  65. SetMessage("my_topic", 0, 9, testMsg).
  66. SetMessage("my_topic", 0, 10, testMsg).
  67. SetMessage("my_topic", 0, 11, testMsg).
  68. SetHighWaterMark("my_topic", 0, 14),
  69. })
  70. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  71. if err != nil {
  72. t.Fatal(err)
  73. }
  74. // When
  75. consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
  76. if err != nil {
  77. t.Fatal(err)
  78. }
  79. // Then
  80. assertMessageOffset(t, <-consumer.Messages(), 10)
  81. if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 {
  82. t.Errorf("Expected high water mark offset 14, found %d", hwmo)
  83. }
  84. safeClose(t, consumer)
  85. safeClose(t, master)
  86. broker0.Close()
  87. }
  88. // It is possible to close a partition consumer and create the same anew.
  89. func TestConsumerRecreate(t *testing.T) {
  90. // Given
  91. broker0 := NewMockBroker(t, 0)
  92. broker0.SetHandlerByMap(map[string]MockResponse{
  93. "MetadataRequest": NewMockMetadataResponse(t).
  94. SetBroker(broker0.Addr(), broker0.BrokerID()).
  95. SetLeader("my_topic", 0, broker0.BrokerID()),
  96. "OffsetRequest": NewMockOffsetResponse(t).
  97. SetOffset("my_topic", 0, OffsetOldest, 0).
  98. SetOffset("my_topic", 0, OffsetNewest, 1000),
  99. "FetchRequest": NewMockFetchResponse(t, 1).
  100. SetMessage("my_topic", 0, 10, testMsg),
  101. })
  102. c, err := NewConsumer([]string{broker0.Addr()}, nil)
  103. if err != nil {
  104. t.Fatal(err)
  105. }
  106. pc, err := c.ConsumePartition("my_topic", 0, 10)
  107. if err != nil {
  108. t.Fatal(err)
  109. }
  110. assertMessageOffset(t, <-pc.Messages(), 10)
  111. // When
  112. safeClose(t, pc)
  113. pc, err = c.ConsumePartition("my_topic", 0, 10)
  114. if err != nil {
  115. t.Fatal(err)
  116. }
  117. // Then
  118. assertMessageOffset(t, <-pc.Messages(), 10)
  119. safeClose(t, pc)
  120. safeClose(t, c)
  121. broker0.Close()
  122. }
  123. // An attempt to consume the same partition twice should fail.
  124. func TestConsumerDuplicate(t *testing.T) {
  125. // Given
  126. broker0 := NewMockBroker(t, 0)
  127. broker0.SetHandlerByMap(map[string]MockResponse{
  128. "MetadataRequest": NewMockMetadataResponse(t).
  129. SetBroker(broker0.Addr(), broker0.BrokerID()).
  130. SetLeader("my_topic", 0, broker0.BrokerID()),
  131. "OffsetRequest": NewMockOffsetResponse(t).
  132. SetOffset("my_topic", 0, OffsetOldest, 0).
  133. SetOffset("my_topic", 0, OffsetNewest, 1000),
  134. "FetchRequest": NewMockFetchResponse(t, 1),
  135. })
  136. config := NewConfig()
  137. config.ChannelBufferSize = 0
  138. c, err := NewConsumer([]string{broker0.Addr()}, config)
  139. if err != nil {
  140. t.Fatal(err)
  141. }
  142. pc1, err := c.ConsumePartition("my_topic", 0, 0)
  143. if err != nil {
  144. t.Fatal(err)
  145. }
  146. // When
  147. pc2, err := c.ConsumePartition("my_topic", 0, 0)
  148. // Then
  149. if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") {
  150. t.Fatal("A partition cannot be consumed twice at the same time")
  151. }
  152. safeClose(t, pc1)
  153. safeClose(t, c)
  154. broker0.Close()
  155. }
  156. // If consumer fails to refresh metadata it keeps retrying with frequency
  157. // specified by `Config.Consumer.Retry.Backoff`.
  158. func TestConsumerLeaderRefreshError(t *testing.T) {
  159. // Given
  160. broker0 := NewMockBroker(t, 100)
  161. // Stage 1: my_topic/0 served by broker0
  162. Logger.Printf(" STAGE 1")
  163. broker0.SetHandlerByMap(map[string]MockResponse{
  164. "MetadataRequest": NewMockMetadataResponse(t).
  165. SetBroker(broker0.Addr(), broker0.BrokerID()).
  166. SetLeader("my_topic", 0, broker0.BrokerID()),
  167. "OffsetRequest": NewMockOffsetResponse(t).
  168. SetOffset("my_topic", 0, OffsetOldest, 123).
  169. SetOffset("my_topic", 0, OffsetNewest, 1000),
  170. "FetchRequest": NewMockFetchResponse(t, 1).
  171. SetMessage("my_topic", 0, 123, testMsg),
  172. })
  173. config := NewConfig()
  174. config.Net.ReadTimeout = 100 * time.Millisecond
  175. config.Consumer.Retry.Backoff = 200 * time.Millisecond
  176. config.Consumer.Return.Errors = true
  177. config.Metadata.Retry.Max = 0
  178. c, err := NewConsumer([]string{broker0.Addr()}, config)
  179. if err != nil {
  180. t.Fatal(err)
  181. }
  182. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  183. if err != nil {
  184. t.Fatal(err)
  185. }
  186. assertMessageOffset(t, <-pc.Messages(), 123)
  187. // Stage 2: broker0 says that it is no longer the leader for my_topic/0,
  188. // but the requests to retrieve metadata fail with network timeout.
  189. Logger.Printf(" STAGE 2")
  190. fetchResponse2 := &FetchResponse{}
  191. fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
  192. broker0.SetHandlerByMap(map[string]MockResponse{
  193. "FetchRequest": NewMockWrapper(fetchResponse2),
  194. })
  195. if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
  196. t.Errorf("Unexpected error: %v", consErr.Err)
  197. }
  198. // Stage 3: finally the metadata returned by broker0 tells that broker1 is
  199. // a new leader for my_topic/0. Consumption resumes.
  200. Logger.Printf(" STAGE 3")
  201. broker1 := NewMockBroker(t, 101)
  202. broker1.SetHandlerByMap(map[string]MockResponse{
  203. "FetchRequest": NewMockFetchResponse(t, 1).
  204. SetMessage("my_topic", 0, 124, testMsg),
  205. })
  206. broker0.SetHandlerByMap(map[string]MockResponse{
  207. "MetadataRequest": NewMockMetadataResponse(t).
  208. SetBroker(broker0.Addr(), broker0.BrokerID()).
  209. SetBroker(broker1.Addr(), broker1.BrokerID()).
  210. SetLeader("my_topic", 0, broker1.BrokerID()),
  211. })
  212. assertMessageOffset(t, <-pc.Messages(), 124)
  213. safeClose(t, pc)
  214. safeClose(t, c)
  215. broker1.Close()
  216. broker0.Close()
  217. }
  218. func TestConsumerInvalidTopic(t *testing.T) {
  219. // Given
  220. broker0 := NewMockBroker(t, 100)
  221. broker0.SetHandlerByMap(map[string]MockResponse{
  222. "MetadataRequest": NewMockMetadataResponse(t).
  223. SetBroker(broker0.Addr(), broker0.BrokerID()),
  224. })
  225. c, err := NewConsumer([]string{broker0.Addr()}, nil)
  226. if err != nil {
  227. t.Fatal(err)
  228. }
  229. // When
  230. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  231. // Then
  232. if pc != nil || err != ErrUnknownTopicOrPartition {
  233. t.Errorf("Should fail with, err=%v", err)
  234. }
  235. safeClose(t, c)
  236. broker0.Close()
  237. }
  238. // Nothing bad happens if a partition consumer that has no leader assigned at
  239. // the moment is closed.
  240. func TestConsumerClosePartitionWithoutLeader(t *testing.T) {
  241. // Given
  242. broker0 := NewMockBroker(t, 100)
  243. broker0.SetHandlerByMap(map[string]MockResponse{
  244. "MetadataRequest": NewMockMetadataResponse(t).
  245. SetBroker(broker0.Addr(), broker0.BrokerID()).
  246. SetLeader("my_topic", 0, broker0.BrokerID()),
  247. "OffsetRequest": NewMockOffsetResponse(t).
  248. SetOffset("my_topic", 0, OffsetOldest, 123).
  249. SetOffset("my_topic", 0, OffsetNewest, 1000),
  250. "FetchRequest": NewMockFetchResponse(t, 1).
  251. SetMessage("my_topic", 0, 123, testMsg),
  252. })
  253. config := NewConfig()
  254. config.Net.ReadTimeout = 100 * time.Millisecond
  255. config.Consumer.Retry.Backoff = 100 * time.Millisecond
  256. config.Consumer.Return.Errors = true
  257. config.Metadata.Retry.Max = 0
  258. c, err := NewConsumer([]string{broker0.Addr()}, config)
  259. if err != nil {
  260. t.Fatal(err)
  261. }
  262. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  263. if err != nil {
  264. t.Fatal(err)
  265. }
  266. assertMessageOffset(t, <-pc.Messages(), 123)
  267. // broker0 says that it is no longer the leader for my_topic/0, but the
  268. // requests to retrieve metadata fail with network timeout.
  269. fetchResponse2 := &FetchResponse{}
  270. fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
  271. broker0.SetHandlerByMap(map[string]MockResponse{
  272. "FetchRequest": NewMockWrapper(fetchResponse2),
  273. })
  274. // When
  275. if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
  276. t.Errorf("Unexpected error: %v", consErr.Err)
  277. }
  278. // Then: the partition consumer can be closed without any problem.
  279. safeClose(t, pc)
  280. safeClose(t, c)
  281. broker0.Close()
  282. }
  283. // If the initial offset passed on partition consumer creation is out of the
  284. // actual offset range for the partition, then the partition consumer stops
  285. // immediately closing its output channels.
  286. func TestConsumerShutsDownOutOfRange(t *testing.T) {
  287. // Given
  288. broker0 := NewMockBroker(t, 0)
  289. fetchResponse := new(FetchResponse)
  290. fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
  291. broker0.SetHandlerByMap(map[string]MockResponse{
  292. "MetadataRequest": NewMockMetadataResponse(t).
  293. SetBroker(broker0.Addr(), broker0.BrokerID()).
  294. SetLeader("my_topic", 0, broker0.BrokerID()),
  295. "OffsetRequest": NewMockOffsetResponse(t).
  296. SetOffset("my_topic", 0, OffsetNewest, 1234).
  297. SetOffset("my_topic", 0, OffsetOldest, 7),
  298. "FetchRequest": NewMockWrapper(fetchResponse),
  299. })
  300. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  301. if err != nil {
  302. t.Fatal(err)
  303. }
  304. // When
  305. consumer, err := master.ConsumePartition("my_topic", 0, 101)
  306. if err != nil {
  307. t.Fatal(err)
  308. }
  309. // Then: consumer should shut down closing its messages and errors channels.
  310. if _, ok := <-consumer.Messages(); ok {
  311. t.Error("Expected the consumer to shut down")
  312. }
  313. safeClose(t, consumer)
  314. safeClose(t, master)
  315. broker0.Close()
  316. }
  317. // If a fetch response contains messages with offsets that are smaller then
  318. // requested, then such messages are ignored.
  319. func TestConsumerExtraOffsets(t *testing.T) {
  320. // Given
  321. legacyFetchResponse := &FetchResponse{}
  322. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1)
  323. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2)
  324. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3)
  325. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4)
  326. newFetchResponse := &FetchResponse{Version: 4}
  327. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 1)
  328. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 2)
  329. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 3)
  330. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 4)
  331. newFetchResponse.SetLastStableOffset("my_topic", 0, 4)
  332. for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
  333. var offsetResponseVersion int16
  334. cfg := NewConfig()
  335. if fetchResponse1.Version >= 4 {
  336. cfg.Version = V0_11_0_0
  337. offsetResponseVersion = 1
  338. }
  339. broker0 := NewMockBroker(t, 0)
  340. fetchResponse2 := &FetchResponse{}
  341. fetchResponse2.Version = fetchResponse1.Version
  342. fetchResponse2.AddError("my_topic", 0, ErrNoError)
  343. broker0.SetHandlerByMap(map[string]MockResponse{
  344. "MetadataRequest": NewMockMetadataResponse(t).
  345. SetBroker(broker0.Addr(), broker0.BrokerID()).
  346. SetLeader("my_topic", 0, broker0.BrokerID()),
  347. "OffsetRequest": NewMockOffsetResponse(t).
  348. SetVersion(offsetResponseVersion).
  349. SetOffset("my_topic", 0, OffsetNewest, 1234).
  350. SetOffset("my_topic", 0, OffsetOldest, 0),
  351. "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
  352. })
  353. master, err := NewConsumer([]string{broker0.Addr()}, cfg)
  354. if err != nil {
  355. t.Fatal(err)
  356. }
  357. // When
  358. consumer, err := master.ConsumePartition("my_topic", 0, 3)
  359. if err != nil {
  360. t.Fatal(err)
  361. }
  362. // Then: messages with offsets 1 and 2 are not returned even though they
  363. // are present in the response.
  364. assertMessageOffset(t, <-consumer.Messages(), 3)
  365. assertMessageOffset(t, <-consumer.Messages(), 4)
  366. safeClose(t, consumer)
  367. safeClose(t, master)
  368. broker0.Close()
  369. }
  370. }
  371. func TestConsumeMessageWithNewerFetchAPIVersion(t *testing.T) {
  372. // Given
  373. fetchResponse1 := &FetchResponse{Version: 4}
  374. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
  375. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
  376. cfg := NewConfig()
  377. cfg.Version = V0_11_0_0
  378. broker0 := NewMockBroker(t, 0)
  379. fetchResponse2 := &FetchResponse{}
  380. fetchResponse2.Version = 4
  381. fetchResponse2.AddError("my_topic", 0, ErrNoError)
  382. broker0.SetHandlerByMap(map[string]MockResponse{
  383. "MetadataRequest": NewMockMetadataResponse(t).
  384. SetBroker(broker0.Addr(), broker0.BrokerID()).
  385. SetLeader("my_topic", 0, broker0.BrokerID()),
  386. "OffsetRequest": NewMockOffsetResponse(t).
  387. SetVersion(1).
  388. SetOffset("my_topic", 0, OffsetNewest, 1234).
  389. SetOffset("my_topic", 0, OffsetOldest, 0),
  390. "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
  391. })
  392. master, err := NewConsumer([]string{broker0.Addr()}, cfg)
  393. if err != nil {
  394. t.Fatal(err)
  395. }
  396. // When
  397. consumer, err := master.ConsumePartition("my_topic", 0, 1)
  398. if err != nil {
  399. t.Fatal(err)
  400. }
  401. assertMessageOffset(t, <-consumer.Messages(), 1)
  402. assertMessageOffset(t, <-consumer.Messages(), 2)
  403. safeClose(t, consumer)
  404. safeClose(t, master)
  405. broker0.Close()
  406. }
  407. // It is fine if offsets of fetched messages are not sequential (although
  408. // strictly increasing!).
  409. func TestConsumerNonSequentialOffsets(t *testing.T) {
  410. // Given
  411. legacyFetchResponse := &FetchResponse{}
  412. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5)
  413. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7)
  414. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11)
  415. newFetchResponse := &FetchResponse{Version: 4}
  416. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 5)
  417. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 7)
  418. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 11)
  419. newFetchResponse.SetLastStableOffset("my_topic", 0, 11)
  420. for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
  421. var offsetResponseVersion int16
  422. cfg := NewConfig()
  423. if fetchResponse1.Version >= 4 {
  424. cfg.Version = V0_11_0_0
  425. offsetResponseVersion = 1
  426. }
  427. broker0 := NewMockBroker(t, 0)
  428. fetchResponse2 := &FetchResponse{Version: fetchResponse1.Version}
  429. fetchResponse2.AddError("my_topic", 0, ErrNoError)
  430. broker0.SetHandlerByMap(map[string]MockResponse{
  431. "MetadataRequest": NewMockMetadataResponse(t).
  432. SetBroker(broker0.Addr(), broker0.BrokerID()).
  433. SetLeader("my_topic", 0, broker0.BrokerID()),
  434. "OffsetRequest": NewMockOffsetResponse(t).
  435. SetVersion(offsetResponseVersion).
  436. SetOffset("my_topic", 0, OffsetNewest, 1234).
  437. SetOffset("my_topic", 0, OffsetOldest, 0),
  438. "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
  439. })
  440. master, err := NewConsumer([]string{broker0.Addr()}, cfg)
  441. if err != nil {
  442. t.Fatal(err)
  443. }
  444. // When
  445. consumer, err := master.ConsumePartition("my_topic", 0, 3)
  446. if err != nil {
  447. t.Fatal(err)
  448. }
  449. // Then: messages with offsets 1 and 2 are not returned even though they
  450. // are present in the response.
  451. assertMessageOffset(t, <-consumer.Messages(), 5)
  452. assertMessageOffset(t, <-consumer.Messages(), 7)
  453. assertMessageOffset(t, <-consumer.Messages(), 11)
  454. safeClose(t, consumer)
  455. safeClose(t, master)
  456. broker0.Close()
  457. }
  458. }
  459. // If leadership for a partition is changing then consumer resolves the new
  460. // leader and switches to it.
  461. func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
  462. // initial setup
  463. seedBroker := NewMockBroker(t, 10)
  464. leader0 := NewMockBroker(t, 0)
  465. leader1 := NewMockBroker(t, 1)
  466. seedBroker.SetHandlerByMap(map[string]MockResponse{
  467. "MetadataRequest": NewMockMetadataResponse(t).
  468. SetBroker(leader0.Addr(), leader0.BrokerID()).
  469. SetBroker(leader1.Addr(), leader1.BrokerID()).
  470. SetLeader("my_topic", 0, leader0.BrokerID()).
  471. SetLeader("my_topic", 1, leader1.BrokerID()),
  472. })
  473. mockOffsetResponse1 := NewMockOffsetResponse(t).
  474. SetOffset("my_topic", 0, OffsetOldest, 0).
  475. SetOffset("my_topic", 0, OffsetNewest, 1000).
  476. SetOffset("my_topic", 1, OffsetOldest, 0).
  477. SetOffset("my_topic", 1, OffsetNewest, 1000)
  478. leader0.SetHandlerByMap(map[string]MockResponse{
  479. "OffsetRequest": mockOffsetResponse1,
  480. "FetchRequest": NewMockFetchResponse(t, 1),
  481. })
  482. leader1.SetHandlerByMap(map[string]MockResponse{
  483. "OffsetRequest": mockOffsetResponse1,
  484. "FetchRequest": NewMockFetchResponse(t, 1),
  485. })
  486. // launch test goroutines
  487. config := NewConfig()
  488. config.Consumer.Retry.Backoff = 50
  489. master, err := NewConsumer([]string{seedBroker.Addr()}, config)
  490. if err != nil {
  491. t.Fatal(err)
  492. }
  493. // we expect to end up (eventually) consuming exactly ten messages on each partition
  494. var wg sync.WaitGroup
  495. for i := int32(0); i < 2; i++ {
  496. consumer, err := master.ConsumePartition("my_topic", i, 0)
  497. if err != nil {
  498. t.Error(err)
  499. }
  500. go func(c PartitionConsumer) {
  501. for err := range c.Errors() {
  502. t.Error(err)
  503. }
  504. }(consumer)
  505. wg.Add(1)
  506. go func(partition int32, c PartitionConsumer) {
  507. for i := 0; i < 10; i++ {
  508. message := <-consumer.Messages()
  509. if message.Offset != int64(i) {
  510. t.Error("Incorrect message offset!", i, partition, message.Offset)
  511. }
  512. if message.Partition != partition {
  513. t.Error("Incorrect message partition!")
  514. }
  515. }
  516. safeClose(t, consumer)
  517. wg.Done()
  518. }(i, consumer)
  519. }
  520. time.Sleep(50 * time.Millisecond)
  521. Logger.Printf(" STAGE 1")
  522. // Stage 1:
  523. // * my_topic/0 -> leader0 serves 4 messages
  524. // * my_topic/1 -> leader1 serves 0 messages
  525. mockFetchResponse := NewMockFetchResponse(t, 1)
  526. for i := 0; i < 4; i++ {
  527. mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg)
  528. }
  529. leader0.SetHandlerByMap(map[string]MockResponse{
  530. "FetchRequest": mockFetchResponse,
  531. })
  532. time.Sleep(50 * time.Millisecond)
  533. Logger.Printf(" STAGE 2")
  534. // Stage 2:
  535. // * leader0 says that it is no longer serving my_topic/0
  536. // * seedBroker tells that leader1 is serving my_topic/0 now
  537. // seed broker tells that the new partition 0 leader is leader1
  538. seedBroker.SetHandlerByMap(map[string]MockResponse{
  539. "MetadataRequest": NewMockMetadataResponse(t).
  540. SetLeader("my_topic", 0, leader1.BrokerID()).
  541. SetLeader("my_topic", 1, leader1.BrokerID()),
  542. })
  543. // leader0 says no longer leader of partition 0
  544. fetchResponse := new(FetchResponse)
  545. fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
  546. leader0.SetHandlerByMap(map[string]MockResponse{
  547. "FetchRequest": NewMockWrapper(fetchResponse),
  548. })
  549. time.Sleep(50 * time.Millisecond)
  550. Logger.Printf(" STAGE 3")
  551. // Stage 3:
  552. // * my_topic/0 -> leader1 serves 3 messages
  553. // * my_topic/1 -> leader1 server 8 messages
  554. // leader1 provides 3 message on partition 0, and 8 messages on partition 1
  555. mockFetchResponse2 := NewMockFetchResponse(t, 2)
  556. for i := 4; i < 7; i++ {
  557. mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg)
  558. }
  559. for i := 0; i < 8; i++ {
  560. mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg)
  561. }
  562. leader1.SetHandlerByMap(map[string]MockResponse{
  563. "FetchRequest": mockFetchResponse2,
  564. })
  565. time.Sleep(50 * time.Millisecond)
  566. Logger.Printf(" STAGE 4")
  567. // Stage 4:
  568. // * my_topic/0 -> leader1 serves 3 messages
  569. // * my_topic/1 -> leader1 tells that it is no longer the leader
  570. // * seedBroker tells that leader0 is a new leader for my_topic/1
  571. // metadata assigns 0 to leader1 and 1 to leader0
  572. seedBroker.SetHandlerByMap(map[string]MockResponse{
  573. "MetadataRequest": NewMockMetadataResponse(t).
  574. SetLeader("my_topic", 0, leader1.BrokerID()).
  575. SetLeader("my_topic", 1, leader0.BrokerID()),
  576. })
  577. // leader1 provides three more messages on partition0, says no longer leader of partition1
  578. mockFetchResponse3 := NewMockFetchResponse(t, 3).
  579. SetMessage("my_topic", 0, int64(7), testMsg).
  580. SetMessage("my_topic", 0, int64(8), testMsg).
  581. SetMessage("my_topic", 0, int64(9), testMsg)
  582. fetchResponse4 := new(FetchResponse)
  583. fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition)
  584. leader1.SetHandlerByMap(map[string]MockResponse{
  585. "FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4),
  586. })
  587. // leader0 provides two messages on partition 1
  588. mockFetchResponse4 := NewMockFetchResponse(t, 2)
  589. for i := 8; i < 10; i++ {
  590. mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg)
  591. }
  592. leader0.SetHandlerByMap(map[string]MockResponse{
  593. "FetchRequest": mockFetchResponse4,
  594. })
  595. wg.Wait()
  596. safeClose(t, master)
  597. leader1.Close()
  598. leader0.Close()
  599. seedBroker.Close()
  600. }
  601. // When two partitions have the same broker as the leader, if one partition
  602. // consumer channel buffer is full then that does not affect the ability to
  603. // read messages by the other consumer.
  604. func TestConsumerInterleavedClose(t *testing.T) {
  605. // Given
  606. broker0 := NewMockBroker(t, 0)
  607. broker0.SetHandlerByMap(map[string]MockResponse{
  608. "MetadataRequest": NewMockMetadataResponse(t).
  609. SetBroker(broker0.Addr(), broker0.BrokerID()).
  610. SetLeader("my_topic", 0, broker0.BrokerID()).
  611. SetLeader("my_topic", 1, broker0.BrokerID()),
  612. "OffsetRequest": NewMockOffsetResponse(t).
  613. SetOffset("my_topic", 0, OffsetOldest, 1000).
  614. SetOffset("my_topic", 0, OffsetNewest, 1100).
  615. SetOffset("my_topic", 1, OffsetOldest, 2000).
  616. SetOffset("my_topic", 1, OffsetNewest, 2100),
  617. "FetchRequest": NewMockFetchResponse(t, 1).
  618. SetMessage("my_topic", 0, 1000, testMsg).
  619. SetMessage("my_topic", 0, 1001, testMsg).
  620. SetMessage("my_topic", 0, 1002, testMsg).
  621. SetMessage("my_topic", 1, 2000, testMsg),
  622. })
  623. config := NewConfig()
  624. config.ChannelBufferSize = 0
  625. master, err := NewConsumer([]string{broker0.Addr()}, config)
  626. if err != nil {
  627. t.Fatal(err)
  628. }
  629. c0, err := master.ConsumePartition("my_topic", 0, 1000)
  630. if err != nil {
  631. t.Fatal(err)
  632. }
  633. c1, err := master.ConsumePartition("my_topic", 1, 2000)
  634. if err != nil {
  635. t.Fatal(err)
  636. }
  637. // When/Then: we can read from partition 0 even if nobody reads from partition 1
  638. assertMessageOffset(t, <-c0.Messages(), 1000)
  639. assertMessageOffset(t, <-c0.Messages(), 1001)
  640. assertMessageOffset(t, <-c0.Messages(), 1002)
  641. safeClose(t, c1)
  642. safeClose(t, c0)
  643. safeClose(t, master)
  644. broker0.Close()
  645. }
  646. func TestConsumerBounceWithReferenceOpen(t *testing.T) {
  647. broker0 := NewMockBroker(t, 0)
  648. broker0Addr := broker0.Addr()
  649. broker1 := NewMockBroker(t, 1)
  650. mockMetadataResponse := NewMockMetadataResponse(t).
  651. SetBroker(broker0.Addr(), broker0.BrokerID()).
  652. SetBroker(broker1.Addr(), broker1.BrokerID()).
  653. SetLeader("my_topic", 0, broker0.BrokerID()).
  654. SetLeader("my_topic", 1, broker1.BrokerID())
  655. mockOffsetResponse := NewMockOffsetResponse(t).
  656. SetOffset("my_topic", 0, OffsetOldest, 1000).
  657. SetOffset("my_topic", 0, OffsetNewest, 1100).
  658. SetOffset("my_topic", 1, OffsetOldest, 2000).
  659. SetOffset("my_topic", 1, OffsetNewest, 2100)
  660. mockFetchResponse := NewMockFetchResponse(t, 1)
  661. for i := 0; i < 10; i++ {
  662. mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg)
  663. mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg)
  664. }
  665. broker0.SetHandlerByMap(map[string]MockResponse{
  666. "OffsetRequest": mockOffsetResponse,
  667. "FetchRequest": mockFetchResponse,
  668. })
  669. broker1.SetHandlerByMap(map[string]MockResponse{
  670. "MetadataRequest": mockMetadataResponse,
  671. "OffsetRequest": mockOffsetResponse,
  672. "FetchRequest": mockFetchResponse,
  673. })
  674. config := NewConfig()
  675. config.Consumer.Return.Errors = true
  676. config.Consumer.Retry.Backoff = 100 * time.Millisecond
  677. config.ChannelBufferSize = 1
  678. master, err := NewConsumer([]string{broker1.Addr()}, config)
  679. if err != nil {
  680. t.Fatal(err)
  681. }
  682. c0, err := master.ConsumePartition("my_topic", 0, 1000)
  683. if err != nil {
  684. t.Fatal(err)
  685. }
  686. c1, err := master.ConsumePartition("my_topic", 1, 2000)
  687. if err != nil {
  688. t.Fatal(err)
  689. }
  690. // read messages from both partition to make sure that both brokers operate
  691. // normally.
  692. assertMessageOffset(t, <-c0.Messages(), 1000)
  693. assertMessageOffset(t, <-c1.Messages(), 2000)
  694. // Simulate broker shutdown. Note that metadata response does not change,
  695. // that is the leadership does not move to another broker. So partition
  696. // consumer will keep retrying to restore the connection with the broker.
  697. broker0.Close()
  698. // Make sure that while the partition/0 leader is down, consumer/partition/1
  699. // is capable of pulling messages from broker1.
  700. for i := 1; i < 7; i++ {
  701. offset := (<-c1.Messages()).Offset
  702. if offset != int64(2000+i) {
  703. t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i))
  704. }
  705. }
  706. // Bring broker0 back to service.
  707. broker0 = NewMockBrokerAddr(t, 0, broker0Addr)
  708. broker0.SetHandlerByMap(map[string]MockResponse{
  709. "FetchRequest": mockFetchResponse,
  710. })
  711. // Read the rest of messages from both partitions.
  712. for i := 7; i < 10; i++ {
  713. assertMessageOffset(t, <-c1.Messages(), int64(2000+i))
  714. }
  715. for i := 1; i < 10; i++ {
  716. assertMessageOffset(t, <-c0.Messages(), int64(1000+i))
  717. }
  718. select {
  719. case <-c0.Errors():
  720. default:
  721. t.Errorf("Partition consumer should have detected broker restart")
  722. }
  723. safeClose(t, c1)
  724. safeClose(t, c0)
  725. safeClose(t, master)
  726. broker0.Close()
  727. broker1.Close()
  728. }
  729. func TestConsumerOffsetOutOfRange(t *testing.T) {
  730. // Given
  731. broker0 := NewMockBroker(t, 2)
  732. broker0.SetHandlerByMap(map[string]MockResponse{
  733. "MetadataRequest": NewMockMetadataResponse(t).
  734. SetBroker(broker0.Addr(), broker0.BrokerID()).
  735. SetLeader("my_topic", 0, broker0.BrokerID()),
  736. "OffsetRequest": NewMockOffsetResponse(t).
  737. SetOffset("my_topic", 0, OffsetNewest, 1234).
  738. SetOffset("my_topic", 0, OffsetOldest, 2345),
  739. })
  740. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  741. if err != nil {
  742. t.Fatal(err)
  743. }
  744. // When/Then
  745. if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange {
  746. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  747. }
  748. if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange {
  749. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  750. }
  751. if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange {
  752. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  753. }
  754. safeClose(t, master)
  755. broker0.Close()
  756. }
  757. func TestConsumerExpiryTicker(t *testing.T) {
  758. // Given
  759. broker0 := NewMockBroker(t, 0)
  760. fetchResponse1 := &FetchResponse{}
  761. for i := 1; i <= 8; i++ {
  762. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i))
  763. }
  764. broker0.SetHandlerByMap(map[string]MockResponse{
  765. "MetadataRequest": NewMockMetadataResponse(t).
  766. SetBroker(broker0.Addr(), broker0.BrokerID()).
  767. SetLeader("my_topic", 0, broker0.BrokerID()),
  768. "OffsetRequest": NewMockOffsetResponse(t).
  769. SetOffset("my_topic", 0, OffsetNewest, 1234).
  770. SetOffset("my_topic", 0, OffsetOldest, 1),
  771. "FetchRequest": NewMockSequence(fetchResponse1),
  772. })
  773. config := NewConfig()
  774. config.ChannelBufferSize = 0
  775. config.Consumer.MaxProcessingTime = 10 * time.Millisecond
  776. master, err := NewConsumer([]string{broker0.Addr()}, config)
  777. if err != nil {
  778. t.Fatal(err)
  779. }
  780. // When
  781. consumer, err := master.ConsumePartition("my_topic", 0, 1)
  782. if err != nil {
  783. t.Fatal(err)
  784. }
  785. // Then: messages with offsets 1 through 8 are read
  786. for i := 1; i <= 8; i++ {
  787. assertMessageOffset(t, <-consumer.Messages(), int64(i))
  788. time.Sleep(2 * time.Millisecond)
  789. }
  790. safeClose(t, consumer)
  791. safeClose(t, master)
  792. broker0.Close()
  793. }
  794. func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
  795. if msg.Offset != expectedOffset {
  796. t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
  797. }
  798. }
  799. // This example shows how to use the consumer to read messages
  800. // from a single partition.
  801. func ExampleConsumer() {
  802. consumer, err := NewConsumer([]string{"localhost:9092"}, nil)
  803. if err != nil {
  804. panic(err)
  805. }
  806. defer func() {
  807. if err := consumer.Close(); err != nil {
  808. log.Fatalln(err)
  809. }
  810. }()
  811. partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest)
  812. if err != nil {
  813. panic(err)
  814. }
  815. defer func() {
  816. if err := partitionConsumer.Close(); err != nil {
  817. log.Fatalln(err)
  818. }
  819. }()
  820. // Trap SIGINT to trigger a shutdown.
  821. signals := make(chan os.Signal, 1)
  822. signal.Notify(signals, os.Interrupt)
  823. consumed := 0
  824. ConsumerLoop:
  825. for {
  826. select {
  827. case msg := <-partitionConsumer.Messages():
  828. log.Printf("Consumed message offset %d\n", msg.Offset)
  829. consumed++
  830. case <-signals:
  831. break ConsumerLoop
  832. }
  833. }
  834. log.Printf("Consumed: %d\n", consumed)
  835. }