consumer_test.go 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928
  1. package sarama
  2. import (
  3. "log"
  4. "os"
  5. "os/signal"
  6. "sync"
  7. "testing"
  8. "time"
  9. )
  10. var testMsg = StringEncoder("Foo")
  11. // If a particular offset is provided then messages are consumed starting from
  12. // that offset.
  13. func TestConsumerOffsetManual(t *testing.T) {
  14. // Given
  15. broker0 := NewMockBroker(t, 0)
  16. mockFetchResponse := NewMockFetchResponse(t, 1)
  17. for i := 0; i < 10; i++ {
  18. mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg)
  19. }
  20. broker0.SetHandlerByMap(map[string]MockResponse{
  21. "MetadataRequest": NewMockMetadataResponse(t).
  22. SetBroker(broker0.Addr(), broker0.BrokerID()).
  23. SetLeader("my_topic", 0, broker0.BrokerID()),
  24. "OffsetRequest": NewMockOffsetResponse(t).
  25. SetOffset("my_topic", 0, OffsetOldest, 0).
  26. SetOffset("my_topic", 0, OffsetNewest, 2345),
  27. "FetchRequest": mockFetchResponse,
  28. })
  29. // When
  30. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  31. if err != nil {
  32. t.Fatal(err)
  33. }
  34. consumer, err := master.ConsumePartition("my_topic", 0, 1234)
  35. if err != nil {
  36. t.Fatal(err)
  37. }
  38. // Then: messages starting from offset 1234 are consumed.
  39. for i := 0; i < 10; i++ {
  40. select {
  41. case message := <-consumer.Messages():
  42. assertMessageOffset(t, message, int64(i+1234))
  43. case err := <-consumer.Errors():
  44. t.Error(err)
  45. }
  46. }
  47. safeClose(t, consumer)
  48. safeClose(t, master)
  49. broker0.Close()
  50. }
  51. // If `OffsetNewest` is passed as the initial offset then the first consumed
  52. // message is indeed corresponds to the offset that broker claims to be the
  53. // newest in its metadata response.
  54. func TestConsumerOffsetNewest(t *testing.T) {
  55. // Given
  56. broker0 := NewMockBroker(t, 0)
  57. broker0.SetHandlerByMap(map[string]MockResponse{
  58. "MetadataRequest": NewMockMetadataResponse(t).
  59. SetBroker(broker0.Addr(), broker0.BrokerID()).
  60. SetLeader("my_topic", 0, broker0.BrokerID()),
  61. "OffsetRequest": NewMockOffsetResponse(t).
  62. SetOffset("my_topic", 0, OffsetNewest, 10).
  63. SetOffset("my_topic", 0, OffsetOldest, 7),
  64. "FetchRequest": NewMockFetchResponse(t, 1).
  65. SetMessage("my_topic", 0, 9, testMsg).
  66. SetMessage("my_topic", 0, 10, testMsg).
  67. SetMessage("my_topic", 0, 11, testMsg).
  68. SetHighWaterMark("my_topic", 0, 14),
  69. })
  70. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  71. if err != nil {
  72. t.Fatal(err)
  73. }
  74. // When
  75. consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
  76. if err != nil {
  77. t.Fatal(err)
  78. }
  79. // Then
  80. assertMessageOffset(t, <-consumer.Messages(), 10)
  81. if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 {
  82. t.Errorf("Expected high water mark offset 14, found %d", hwmo)
  83. }
  84. safeClose(t, consumer)
  85. safeClose(t, master)
  86. broker0.Close()
  87. }
  88. // It is possible to close a partition consumer and create the same anew.
  89. func TestConsumerRecreate(t *testing.T) {
  90. // Given
  91. broker0 := NewMockBroker(t, 0)
  92. broker0.SetHandlerByMap(map[string]MockResponse{
  93. "MetadataRequest": NewMockMetadataResponse(t).
  94. SetBroker(broker0.Addr(), broker0.BrokerID()).
  95. SetLeader("my_topic", 0, broker0.BrokerID()),
  96. "OffsetRequest": NewMockOffsetResponse(t).
  97. SetOffset("my_topic", 0, OffsetOldest, 0).
  98. SetOffset("my_topic", 0, OffsetNewest, 1000),
  99. "FetchRequest": NewMockFetchResponse(t, 1).
  100. SetMessage("my_topic", 0, 10, testMsg),
  101. })
  102. c, err := NewConsumer([]string{broker0.Addr()}, nil)
  103. if err != nil {
  104. t.Fatal(err)
  105. }
  106. pc, err := c.ConsumePartition("my_topic", 0, 10)
  107. if err != nil {
  108. t.Fatal(err)
  109. }
  110. assertMessageOffset(t, <-pc.Messages(), 10)
  111. // When
  112. safeClose(t, pc)
  113. pc, err = c.ConsumePartition("my_topic", 0, 10)
  114. if err != nil {
  115. t.Fatal(err)
  116. }
  117. // Then
  118. assertMessageOffset(t, <-pc.Messages(), 10)
  119. safeClose(t, pc)
  120. safeClose(t, c)
  121. broker0.Close()
  122. }
  123. // An attempt to consume the same partition twice should fail.
  124. func TestConsumerDuplicate(t *testing.T) {
  125. // Given
  126. broker0 := NewMockBroker(t, 0)
  127. broker0.SetHandlerByMap(map[string]MockResponse{
  128. "MetadataRequest": NewMockMetadataResponse(t).
  129. SetBroker(broker0.Addr(), broker0.BrokerID()).
  130. SetLeader("my_topic", 0, broker0.BrokerID()),
  131. "OffsetRequest": NewMockOffsetResponse(t).
  132. SetOffset("my_topic", 0, OffsetOldest, 0).
  133. SetOffset("my_topic", 0, OffsetNewest, 1000),
  134. "FetchRequest": NewMockFetchResponse(t, 1),
  135. })
  136. config := NewConfig()
  137. config.ChannelBufferSize = 0
  138. c, err := NewConsumer([]string{broker0.Addr()}, config)
  139. if err != nil {
  140. t.Fatal(err)
  141. }
  142. pc1, err := c.ConsumePartition("my_topic", 0, 0)
  143. if err != nil {
  144. t.Fatal(err)
  145. }
  146. // When
  147. pc2, err := c.ConsumePartition("my_topic", 0, 0)
  148. // Then
  149. if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") {
  150. t.Fatal("A partition cannot be consumed twice at the same time")
  151. }
  152. safeClose(t, pc1)
  153. safeClose(t, c)
  154. broker0.Close()
  155. }
  156. // If consumer fails to refresh metadata it keeps retrying with frequency
  157. // specified by `Config.Consumer.Retry.Backoff`.
  158. func TestConsumerLeaderRefreshError(t *testing.T) {
  159. // Given
  160. broker0 := NewMockBroker(t, 100)
  161. // Stage 1: my_topic/0 served by broker0
  162. Logger.Printf(" STAGE 1")
  163. broker0.SetHandlerByMap(map[string]MockResponse{
  164. "MetadataRequest": NewMockMetadataResponse(t).
  165. SetBroker(broker0.Addr(), broker0.BrokerID()).
  166. SetLeader("my_topic", 0, broker0.BrokerID()),
  167. "OffsetRequest": NewMockOffsetResponse(t).
  168. SetOffset("my_topic", 0, OffsetOldest, 123).
  169. SetOffset("my_topic", 0, OffsetNewest, 1000),
  170. "FetchRequest": NewMockFetchResponse(t, 1).
  171. SetMessage("my_topic", 0, 123, testMsg),
  172. })
  173. config := NewConfig()
  174. config.Net.ReadTimeout = 100 * time.Millisecond
  175. config.Consumer.Retry.Backoff = 200 * time.Millisecond
  176. config.Consumer.Return.Errors = true
  177. config.Metadata.Retry.Max = 0
  178. c, err := NewConsumer([]string{broker0.Addr()}, config)
  179. if err != nil {
  180. t.Fatal(err)
  181. }
  182. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  183. if err != nil {
  184. t.Fatal(err)
  185. }
  186. assertMessageOffset(t, <-pc.Messages(), 123)
  187. // Stage 2: broker0 says that it is no longer the leader for my_topic/0,
  188. // but the requests to retrieve metadata fail with network timeout.
  189. Logger.Printf(" STAGE 2")
  190. fetchResponse2 := &FetchResponse{}
  191. fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
  192. broker0.SetHandlerByMap(map[string]MockResponse{
  193. "FetchRequest": NewMockWrapper(fetchResponse2),
  194. })
  195. if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
  196. t.Errorf("Unexpected error: %v", consErr.Err)
  197. }
  198. // Stage 3: finally the metadata returned by broker0 tells that broker1 is
  199. // a new leader for my_topic/0. Consumption resumes.
  200. Logger.Printf(" STAGE 3")
  201. broker1 := NewMockBroker(t, 101)
  202. broker1.SetHandlerByMap(map[string]MockResponse{
  203. "FetchRequest": NewMockFetchResponse(t, 1).
  204. SetMessage("my_topic", 0, 124, testMsg),
  205. })
  206. broker0.SetHandlerByMap(map[string]MockResponse{
  207. "MetadataRequest": NewMockMetadataResponse(t).
  208. SetBroker(broker0.Addr(), broker0.BrokerID()).
  209. SetBroker(broker1.Addr(), broker1.BrokerID()).
  210. SetLeader("my_topic", 0, broker1.BrokerID()),
  211. })
  212. assertMessageOffset(t, <-pc.Messages(), 124)
  213. safeClose(t, pc)
  214. safeClose(t, c)
  215. broker1.Close()
  216. broker0.Close()
  217. }
  218. func TestConsumerInvalidTopic(t *testing.T) {
  219. // Given
  220. broker0 := NewMockBroker(t, 100)
  221. broker0.SetHandlerByMap(map[string]MockResponse{
  222. "MetadataRequest": NewMockMetadataResponse(t).
  223. SetBroker(broker0.Addr(), broker0.BrokerID()),
  224. })
  225. c, err := NewConsumer([]string{broker0.Addr()}, nil)
  226. if err != nil {
  227. t.Fatal(err)
  228. }
  229. // When
  230. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  231. // Then
  232. if pc != nil || err != ErrUnknownTopicOrPartition {
  233. t.Errorf("Should fail with, err=%v", err)
  234. }
  235. safeClose(t, c)
  236. broker0.Close()
  237. }
  238. // Nothing bad happens if a partition consumer that has no leader assigned at
  239. // the moment is closed.
  240. func TestConsumerClosePartitionWithoutLeader(t *testing.T) {
  241. // Given
  242. broker0 := NewMockBroker(t, 100)
  243. broker0.SetHandlerByMap(map[string]MockResponse{
  244. "MetadataRequest": NewMockMetadataResponse(t).
  245. SetBroker(broker0.Addr(), broker0.BrokerID()).
  246. SetLeader("my_topic", 0, broker0.BrokerID()),
  247. "OffsetRequest": NewMockOffsetResponse(t).
  248. SetOffset("my_topic", 0, OffsetOldest, 123).
  249. SetOffset("my_topic", 0, OffsetNewest, 1000),
  250. "FetchRequest": NewMockFetchResponse(t, 1).
  251. SetMessage("my_topic", 0, 123, testMsg),
  252. })
  253. config := NewConfig()
  254. config.Net.ReadTimeout = 100 * time.Millisecond
  255. config.Consumer.Retry.Backoff = 100 * time.Millisecond
  256. config.Consumer.Return.Errors = true
  257. config.Metadata.Retry.Max = 0
  258. c, err := NewConsumer([]string{broker0.Addr()}, config)
  259. if err != nil {
  260. t.Fatal(err)
  261. }
  262. pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
  263. if err != nil {
  264. t.Fatal(err)
  265. }
  266. assertMessageOffset(t, <-pc.Messages(), 123)
  267. // broker0 says that it is no longer the leader for my_topic/0, but the
  268. // requests to retrieve metadata fail with network timeout.
  269. fetchResponse2 := &FetchResponse{}
  270. fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
  271. broker0.SetHandlerByMap(map[string]MockResponse{
  272. "FetchRequest": NewMockWrapper(fetchResponse2),
  273. })
  274. // When
  275. if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
  276. t.Errorf("Unexpected error: %v", consErr.Err)
  277. }
  278. // Then: the partition consumer can be closed without any problem.
  279. safeClose(t, pc)
  280. safeClose(t, c)
  281. broker0.Close()
  282. }
  283. // If the initial offset passed on partition consumer creation is out of the
  284. // actual offset range for the partition, then the partition consumer stops
  285. // immediately closing its output channels.
  286. func TestConsumerShutsDownOutOfRange(t *testing.T) {
  287. // Given
  288. broker0 := NewMockBroker(t, 0)
  289. fetchResponse := new(FetchResponse)
  290. fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
  291. broker0.SetHandlerByMap(map[string]MockResponse{
  292. "MetadataRequest": NewMockMetadataResponse(t).
  293. SetBroker(broker0.Addr(), broker0.BrokerID()).
  294. SetLeader("my_topic", 0, broker0.BrokerID()),
  295. "OffsetRequest": NewMockOffsetResponse(t).
  296. SetOffset("my_topic", 0, OffsetNewest, 1234).
  297. SetOffset("my_topic", 0, OffsetOldest, 7),
  298. "FetchRequest": NewMockWrapper(fetchResponse),
  299. })
  300. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  301. if err != nil {
  302. t.Fatal(err)
  303. }
  304. // When
  305. consumer, err := master.ConsumePartition("my_topic", 0, 101)
  306. if err != nil {
  307. t.Fatal(err)
  308. }
  309. // Then: consumer should shut down closing its messages and errors channels.
  310. if _, ok := <-consumer.Messages(); ok {
  311. t.Error("Expected the consumer to shut down")
  312. }
  313. safeClose(t, consumer)
  314. safeClose(t, master)
  315. broker0.Close()
  316. }
  317. // If a fetch response contains messages with offsets that are smaller then
  318. // requested, then such messages are ignored.
  319. func TestConsumerExtraOffsets(t *testing.T) {
  320. // Given
  321. legacyFetchResponse := &FetchResponse{}
  322. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1)
  323. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2)
  324. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3)
  325. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4)
  326. newFetchResponse := &FetchResponse{Version: 4}
  327. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 1)
  328. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 2)
  329. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 3)
  330. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 4)
  331. newFetchResponse.SetLastStableOffset("my_topic", 0, 4)
  332. for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
  333. var offsetResponseVersion int16
  334. cfg := NewConfig()
  335. if fetchResponse1.Version >= 4 {
  336. cfg.Version = V0_11_0_0
  337. offsetResponseVersion = 1
  338. }
  339. broker0 := NewMockBroker(t, 0)
  340. fetchResponse2 := &FetchResponse{}
  341. fetchResponse2.Version = fetchResponse1.Version
  342. fetchResponse2.AddError("my_topic", 0, ErrNoError)
  343. broker0.SetHandlerByMap(map[string]MockResponse{
  344. "MetadataRequest": NewMockMetadataResponse(t).
  345. SetBroker(broker0.Addr(), broker0.BrokerID()).
  346. SetLeader("my_topic", 0, broker0.BrokerID()),
  347. "OffsetRequest": NewMockOffsetResponse(t).
  348. SetVersion(offsetResponseVersion).
  349. SetOffset("my_topic", 0, OffsetNewest, 1234).
  350. SetOffset("my_topic", 0, OffsetOldest, 0),
  351. "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
  352. })
  353. master, err := NewConsumer([]string{broker0.Addr()}, cfg)
  354. if err != nil {
  355. t.Fatal(err)
  356. }
  357. // When
  358. consumer, err := master.ConsumePartition("my_topic", 0, 3)
  359. if err != nil {
  360. t.Fatal(err)
  361. }
  362. // Then: messages with offsets 1 and 2 are not returned even though they
  363. // are present in the response.
  364. assertMessageOffset(t, <-consumer.Messages(), 3)
  365. assertMessageOffset(t, <-consumer.Messages(), 4)
  366. safeClose(t, consumer)
  367. safeClose(t, master)
  368. broker0.Close()
  369. }
  370. }
  371. // It is fine if offsets of fetched messages are not sequential (although
  372. // strictly increasing!).
  373. func TestConsumerNonSequentialOffsets(t *testing.T) {
  374. // Given
  375. legacyFetchResponse := &FetchResponse{}
  376. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5)
  377. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7)
  378. legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11)
  379. newFetchResponse := &FetchResponse{Version: 4}
  380. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 5)
  381. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 7)
  382. newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 11)
  383. newFetchResponse.SetLastStableOffset("my_topic", 0, 11)
  384. for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
  385. var offsetResponseVersion int16
  386. cfg := NewConfig()
  387. if fetchResponse1.Version >= 4 {
  388. cfg.Version = V0_11_0_0
  389. offsetResponseVersion = 1
  390. }
  391. broker0 := NewMockBroker(t, 0)
  392. fetchResponse2 := &FetchResponse{Version: fetchResponse1.Version}
  393. fetchResponse2.AddError("my_topic", 0, ErrNoError)
  394. broker0.SetHandlerByMap(map[string]MockResponse{
  395. "MetadataRequest": NewMockMetadataResponse(t).
  396. SetBroker(broker0.Addr(), broker0.BrokerID()).
  397. SetLeader("my_topic", 0, broker0.BrokerID()),
  398. "OffsetRequest": NewMockOffsetResponse(t).
  399. SetVersion(offsetResponseVersion).
  400. SetOffset("my_topic", 0, OffsetNewest, 1234).
  401. SetOffset("my_topic", 0, OffsetOldest, 0),
  402. "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
  403. })
  404. master, err := NewConsumer([]string{broker0.Addr()}, cfg)
  405. if err != nil {
  406. t.Fatal(err)
  407. }
  408. // When
  409. consumer, err := master.ConsumePartition("my_topic", 0, 3)
  410. if err != nil {
  411. t.Fatal(err)
  412. }
  413. // Then: messages with offsets 1 and 2 are not returned even though they
  414. // are present in the response.
  415. assertMessageOffset(t, <-consumer.Messages(), 5)
  416. assertMessageOffset(t, <-consumer.Messages(), 7)
  417. assertMessageOffset(t, <-consumer.Messages(), 11)
  418. safeClose(t, consumer)
  419. safeClose(t, master)
  420. broker0.Close()
  421. }
  422. }
  423. // If leadership for a partition is changing then consumer resolves the new
  424. // leader and switches to it.
  425. func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
  426. // initial setup
  427. seedBroker := NewMockBroker(t, 10)
  428. leader0 := NewMockBroker(t, 0)
  429. leader1 := NewMockBroker(t, 1)
  430. seedBroker.SetHandlerByMap(map[string]MockResponse{
  431. "MetadataRequest": NewMockMetadataResponse(t).
  432. SetBroker(leader0.Addr(), leader0.BrokerID()).
  433. SetBroker(leader1.Addr(), leader1.BrokerID()).
  434. SetLeader("my_topic", 0, leader0.BrokerID()).
  435. SetLeader("my_topic", 1, leader1.BrokerID()),
  436. })
  437. mockOffsetResponse1 := NewMockOffsetResponse(t).
  438. SetOffset("my_topic", 0, OffsetOldest, 0).
  439. SetOffset("my_topic", 0, OffsetNewest, 1000).
  440. SetOffset("my_topic", 1, OffsetOldest, 0).
  441. SetOffset("my_topic", 1, OffsetNewest, 1000)
  442. leader0.SetHandlerByMap(map[string]MockResponse{
  443. "OffsetRequest": mockOffsetResponse1,
  444. "FetchRequest": NewMockFetchResponse(t, 1),
  445. })
  446. leader1.SetHandlerByMap(map[string]MockResponse{
  447. "OffsetRequest": mockOffsetResponse1,
  448. "FetchRequest": NewMockFetchResponse(t, 1),
  449. })
  450. // launch test goroutines
  451. config := NewConfig()
  452. config.Consumer.Retry.Backoff = 50
  453. master, err := NewConsumer([]string{seedBroker.Addr()}, config)
  454. if err != nil {
  455. t.Fatal(err)
  456. }
  457. // we expect to end up (eventually) consuming exactly ten messages on each partition
  458. var wg sync.WaitGroup
  459. for i := int32(0); i < 2; i++ {
  460. consumer, err := master.ConsumePartition("my_topic", i, 0)
  461. if err != nil {
  462. t.Error(err)
  463. }
  464. go func(c PartitionConsumer) {
  465. for err := range c.Errors() {
  466. t.Error(err)
  467. }
  468. }(consumer)
  469. wg.Add(1)
  470. go func(partition int32, c PartitionConsumer) {
  471. for i := 0; i < 10; i++ {
  472. message := <-consumer.Messages()
  473. if message.Offset != int64(i) {
  474. t.Error("Incorrect message offset!", i, partition, message.Offset)
  475. }
  476. if message.Partition != partition {
  477. t.Error("Incorrect message partition!")
  478. }
  479. }
  480. safeClose(t, consumer)
  481. wg.Done()
  482. }(i, consumer)
  483. }
  484. time.Sleep(50 * time.Millisecond)
  485. Logger.Printf(" STAGE 1")
  486. // Stage 1:
  487. // * my_topic/0 -> leader0 serves 4 messages
  488. // * my_topic/1 -> leader1 serves 0 messages
  489. mockFetchResponse := NewMockFetchResponse(t, 1)
  490. for i := 0; i < 4; i++ {
  491. mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg)
  492. }
  493. leader0.SetHandlerByMap(map[string]MockResponse{
  494. "FetchRequest": mockFetchResponse,
  495. })
  496. time.Sleep(50 * time.Millisecond)
  497. Logger.Printf(" STAGE 2")
  498. // Stage 2:
  499. // * leader0 says that it is no longer serving my_topic/0
  500. // * seedBroker tells that leader1 is serving my_topic/0 now
  501. // seed broker tells that the new partition 0 leader is leader1
  502. seedBroker.SetHandlerByMap(map[string]MockResponse{
  503. "MetadataRequest": NewMockMetadataResponse(t).
  504. SetLeader("my_topic", 0, leader1.BrokerID()).
  505. SetLeader("my_topic", 1, leader1.BrokerID()),
  506. })
  507. // leader0 says no longer leader of partition 0
  508. fetchResponse := new(FetchResponse)
  509. fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
  510. leader0.SetHandlerByMap(map[string]MockResponse{
  511. "FetchRequest": NewMockWrapper(fetchResponse),
  512. })
  513. time.Sleep(50 * time.Millisecond)
  514. Logger.Printf(" STAGE 3")
  515. // Stage 3:
  516. // * my_topic/0 -> leader1 serves 3 messages
  517. // * my_topic/1 -> leader1 server 8 messages
  518. // leader1 provides 3 message on partition 0, and 8 messages on partition 1
  519. mockFetchResponse2 := NewMockFetchResponse(t, 2)
  520. for i := 4; i < 7; i++ {
  521. mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg)
  522. }
  523. for i := 0; i < 8; i++ {
  524. mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg)
  525. }
  526. leader1.SetHandlerByMap(map[string]MockResponse{
  527. "FetchRequest": mockFetchResponse2,
  528. })
  529. time.Sleep(50 * time.Millisecond)
  530. Logger.Printf(" STAGE 4")
  531. // Stage 4:
  532. // * my_topic/0 -> leader1 serves 3 messages
  533. // * my_topic/1 -> leader1 tells that it is no longer the leader
  534. // * seedBroker tells that leader0 is a new leader for my_topic/1
  535. // metadata assigns 0 to leader1 and 1 to leader0
  536. seedBroker.SetHandlerByMap(map[string]MockResponse{
  537. "MetadataRequest": NewMockMetadataResponse(t).
  538. SetLeader("my_topic", 0, leader1.BrokerID()).
  539. SetLeader("my_topic", 1, leader0.BrokerID()),
  540. })
  541. // leader1 provides three more messages on partition0, says no longer leader of partition1
  542. mockFetchResponse3 := NewMockFetchResponse(t, 3).
  543. SetMessage("my_topic", 0, int64(7), testMsg).
  544. SetMessage("my_topic", 0, int64(8), testMsg).
  545. SetMessage("my_topic", 0, int64(9), testMsg)
  546. fetchResponse4 := new(FetchResponse)
  547. fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition)
  548. leader1.SetHandlerByMap(map[string]MockResponse{
  549. "FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4),
  550. })
  551. // leader0 provides two messages on partition 1
  552. mockFetchResponse4 := NewMockFetchResponse(t, 2)
  553. for i := 8; i < 10; i++ {
  554. mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg)
  555. }
  556. leader0.SetHandlerByMap(map[string]MockResponse{
  557. "FetchRequest": mockFetchResponse4,
  558. })
  559. wg.Wait()
  560. safeClose(t, master)
  561. leader1.Close()
  562. leader0.Close()
  563. seedBroker.Close()
  564. }
  565. // When two partitions have the same broker as the leader, if one partition
  566. // consumer channel buffer is full then that does not affect the ability to
  567. // read messages by the other consumer.
  568. func TestConsumerInterleavedClose(t *testing.T) {
  569. // Given
  570. broker0 := NewMockBroker(t, 0)
  571. broker0.SetHandlerByMap(map[string]MockResponse{
  572. "MetadataRequest": NewMockMetadataResponse(t).
  573. SetBroker(broker0.Addr(), broker0.BrokerID()).
  574. SetLeader("my_topic", 0, broker0.BrokerID()).
  575. SetLeader("my_topic", 1, broker0.BrokerID()),
  576. "OffsetRequest": NewMockOffsetResponse(t).
  577. SetOffset("my_topic", 0, OffsetOldest, 1000).
  578. SetOffset("my_topic", 0, OffsetNewest, 1100).
  579. SetOffset("my_topic", 1, OffsetOldest, 2000).
  580. SetOffset("my_topic", 1, OffsetNewest, 2100),
  581. "FetchRequest": NewMockFetchResponse(t, 1).
  582. SetMessage("my_topic", 0, 1000, testMsg).
  583. SetMessage("my_topic", 0, 1001, testMsg).
  584. SetMessage("my_topic", 0, 1002, testMsg).
  585. SetMessage("my_topic", 1, 2000, testMsg),
  586. })
  587. config := NewConfig()
  588. config.ChannelBufferSize = 0
  589. master, err := NewConsumer([]string{broker0.Addr()}, config)
  590. if err != nil {
  591. t.Fatal(err)
  592. }
  593. c0, err := master.ConsumePartition("my_topic", 0, 1000)
  594. if err != nil {
  595. t.Fatal(err)
  596. }
  597. c1, err := master.ConsumePartition("my_topic", 1, 2000)
  598. if err != nil {
  599. t.Fatal(err)
  600. }
  601. // When/Then: we can read from partition 0 even if nobody reads from partition 1
  602. assertMessageOffset(t, <-c0.Messages(), 1000)
  603. assertMessageOffset(t, <-c0.Messages(), 1001)
  604. assertMessageOffset(t, <-c0.Messages(), 1002)
  605. safeClose(t, c1)
  606. safeClose(t, c0)
  607. safeClose(t, master)
  608. broker0.Close()
  609. }
  610. func TestConsumerBounceWithReferenceOpen(t *testing.T) {
  611. broker0 := NewMockBroker(t, 0)
  612. broker0Addr := broker0.Addr()
  613. broker1 := NewMockBroker(t, 1)
  614. mockMetadataResponse := NewMockMetadataResponse(t).
  615. SetBroker(broker0.Addr(), broker0.BrokerID()).
  616. SetBroker(broker1.Addr(), broker1.BrokerID()).
  617. SetLeader("my_topic", 0, broker0.BrokerID()).
  618. SetLeader("my_topic", 1, broker1.BrokerID())
  619. mockOffsetResponse := NewMockOffsetResponse(t).
  620. SetOffset("my_topic", 0, OffsetOldest, 1000).
  621. SetOffset("my_topic", 0, OffsetNewest, 1100).
  622. SetOffset("my_topic", 1, OffsetOldest, 2000).
  623. SetOffset("my_topic", 1, OffsetNewest, 2100)
  624. mockFetchResponse := NewMockFetchResponse(t, 1)
  625. for i := 0; i < 10; i++ {
  626. mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg)
  627. mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg)
  628. }
  629. broker0.SetHandlerByMap(map[string]MockResponse{
  630. "OffsetRequest": mockOffsetResponse,
  631. "FetchRequest": mockFetchResponse,
  632. })
  633. broker1.SetHandlerByMap(map[string]MockResponse{
  634. "MetadataRequest": mockMetadataResponse,
  635. "OffsetRequest": mockOffsetResponse,
  636. "FetchRequest": mockFetchResponse,
  637. })
  638. config := NewConfig()
  639. config.Consumer.Return.Errors = true
  640. config.Consumer.Retry.Backoff = 100 * time.Millisecond
  641. config.ChannelBufferSize = 1
  642. master, err := NewConsumer([]string{broker1.Addr()}, config)
  643. if err != nil {
  644. t.Fatal(err)
  645. }
  646. c0, err := master.ConsumePartition("my_topic", 0, 1000)
  647. if err != nil {
  648. t.Fatal(err)
  649. }
  650. c1, err := master.ConsumePartition("my_topic", 1, 2000)
  651. if err != nil {
  652. t.Fatal(err)
  653. }
  654. // read messages from both partition to make sure that both brokers operate
  655. // normally.
  656. assertMessageOffset(t, <-c0.Messages(), 1000)
  657. assertMessageOffset(t, <-c1.Messages(), 2000)
  658. // Simulate broker shutdown. Note that metadata response does not change,
  659. // that is the leadership does not move to another broker. So partition
  660. // consumer will keep retrying to restore the connection with the broker.
  661. broker0.Close()
  662. // Make sure that while the partition/0 leader is down, consumer/partition/1
  663. // is capable of pulling messages from broker1.
  664. for i := 1; i < 7; i++ {
  665. offset := (<-c1.Messages()).Offset
  666. if offset != int64(2000+i) {
  667. t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i))
  668. }
  669. }
  670. // Bring broker0 back to service.
  671. broker0 = NewMockBrokerAddr(t, 0, broker0Addr)
  672. broker0.SetHandlerByMap(map[string]MockResponse{
  673. "FetchRequest": mockFetchResponse,
  674. })
  675. // Read the rest of messages from both partitions.
  676. for i := 7; i < 10; i++ {
  677. assertMessageOffset(t, <-c1.Messages(), int64(2000+i))
  678. }
  679. for i := 1; i < 10; i++ {
  680. assertMessageOffset(t, <-c0.Messages(), int64(1000+i))
  681. }
  682. select {
  683. case <-c0.Errors():
  684. default:
  685. t.Errorf("Partition consumer should have detected broker restart")
  686. }
  687. safeClose(t, c1)
  688. safeClose(t, c0)
  689. safeClose(t, master)
  690. broker0.Close()
  691. broker1.Close()
  692. }
  693. func TestConsumerOffsetOutOfRange(t *testing.T) {
  694. // Given
  695. broker0 := NewMockBroker(t, 2)
  696. broker0.SetHandlerByMap(map[string]MockResponse{
  697. "MetadataRequest": NewMockMetadataResponse(t).
  698. SetBroker(broker0.Addr(), broker0.BrokerID()).
  699. SetLeader("my_topic", 0, broker0.BrokerID()),
  700. "OffsetRequest": NewMockOffsetResponse(t).
  701. SetOffset("my_topic", 0, OffsetNewest, 1234).
  702. SetOffset("my_topic", 0, OffsetOldest, 2345),
  703. })
  704. master, err := NewConsumer([]string{broker0.Addr()}, nil)
  705. if err != nil {
  706. t.Fatal(err)
  707. }
  708. // When/Then
  709. if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange {
  710. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  711. }
  712. if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange {
  713. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  714. }
  715. if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange {
  716. t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
  717. }
  718. safeClose(t, master)
  719. broker0.Close()
  720. }
  721. func TestConsumerExpiryTicker(t *testing.T) {
  722. // Given
  723. broker0 := NewMockBroker(t, 0)
  724. fetchResponse1 := &FetchResponse{}
  725. for i := 1; i <= 8; i++ {
  726. fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i))
  727. }
  728. broker0.SetHandlerByMap(map[string]MockResponse{
  729. "MetadataRequest": NewMockMetadataResponse(t).
  730. SetBroker(broker0.Addr(), broker0.BrokerID()).
  731. SetLeader("my_topic", 0, broker0.BrokerID()),
  732. "OffsetRequest": NewMockOffsetResponse(t).
  733. SetOffset("my_topic", 0, OffsetNewest, 1234).
  734. SetOffset("my_topic", 0, OffsetOldest, 1),
  735. "FetchRequest": NewMockSequence(fetchResponse1),
  736. })
  737. config := NewConfig()
  738. config.ChannelBufferSize = 0
  739. config.Consumer.MaxProcessingTime = 10 * time.Millisecond
  740. master, err := NewConsumer([]string{broker0.Addr()}, config)
  741. if err != nil {
  742. t.Fatal(err)
  743. }
  744. // When
  745. consumer, err := master.ConsumePartition("my_topic", 0, 1)
  746. if err != nil {
  747. t.Fatal(err)
  748. }
  749. // Then: messages with offsets 1 through 8 are read
  750. for i := 1; i <= 8; i++ {
  751. assertMessageOffset(t, <-consumer.Messages(), int64(i))
  752. time.Sleep(2 * time.Millisecond)
  753. }
  754. safeClose(t, consumer)
  755. safeClose(t, master)
  756. broker0.Close()
  757. }
  758. func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
  759. if msg.Offset != expectedOffset {
  760. t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
  761. }
  762. }
  763. // This example shows how to use the consumer to read messages
  764. // from a single partition.
  765. func ExampleConsumer() {
  766. consumer, err := NewConsumer([]string{"localhost:9092"}, nil)
  767. if err != nil {
  768. panic(err)
  769. }
  770. defer func() {
  771. if err := consumer.Close(); err != nil {
  772. log.Fatalln(err)
  773. }
  774. }()
  775. partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest)
  776. if err != nil {
  777. panic(err)
  778. }
  779. defer func() {
  780. if err := partitionConsumer.Close(); err != nil {
  781. log.Fatalln(err)
  782. }
  783. }()
  784. // Trap SIGINT to trigger a shutdown.
  785. signals := make(chan os.Signal, 1)
  786. signal.Notify(signals, os.Interrupt)
  787. consumed := 0
  788. ConsumerLoop:
  789. for {
  790. select {
  791. case msg := <-partitionConsumer.Messages():
  792. log.Printf("Consumed message offset %d\n", msg.Offset)
  793. consumed++
  794. case <-signals:
  795. break ConsumerLoop
  796. }
  797. }
  798. log.Printf("Consumed: %d\n", consumed)
  799. }