consumer_test.go 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. package sarama
  2. import (
  3. "fmt"
  4. "sync"
  5. "testing"
  6. "time"
  7. )
  8. func TestConsumerOffsetManual(t *testing.T) {
  9. seedBroker := newMockBroker(t, 1)
  10. leader := newMockBroker(t, 2)
  11. metadataResponse := new(MetadataResponse)
  12. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  13. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  14. seedBroker.Returns(metadataResponse)
  15. for i := 0; i <= 10; i++ {
  16. fetchResponse := new(FetchResponse)
  17. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+1234))
  18. leader.Returns(fetchResponse)
  19. }
  20. client, err := NewClient([]string{seedBroker.Addr()}, nil)
  21. if err != nil {
  22. t.Fatal(err)
  23. }
  24. master, err := NewConsumer(client, nil)
  25. if err != nil {
  26. t.Fatal(err)
  27. }
  28. consumer, err := master.ConsumePartition("my_topic", 0, 1234)
  29. if err != nil {
  30. t.Fatal(err)
  31. }
  32. seedBroker.Close()
  33. for i := 0; i < 10; i++ {
  34. select {
  35. case message := <-consumer.Messages():
  36. if message.Offset != int64(i+1234) {
  37. t.Error("Incorrect message offset!")
  38. }
  39. case err := <-consumer.Errors():
  40. t.Error(err)
  41. }
  42. }
  43. safeClose(t, consumer)
  44. safeClose(t, client)
  45. leader.Close()
  46. }
  47. func TestConsumerLatestOffset(t *testing.T) {
  48. seedBroker := newMockBroker(t, 1)
  49. leader := newMockBroker(t, 2)
  50. metadataResponse := new(MetadataResponse)
  51. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  52. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  53. seedBroker.Returns(metadataResponse)
  54. offsetResponse := new(OffsetResponse)
  55. offsetResponse.AddTopicPartition("my_topic", 0, 0x010101)
  56. leader.Returns(offsetResponse)
  57. fetchResponse := new(FetchResponse)
  58. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), 0x010101)
  59. leader.Returns(fetchResponse)
  60. client, err := NewClient([]string{seedBroker.Addr()}, nil)
  61. if err != nil {
  62. t.Fatal(err)
  63. }
  64. seedBroker.Close()
  65. master, err := NewConsumer(client, nil)
  66. if err != nil {
  67. t.Fatal(err)
  68. }
  69. consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
  70. if err != nil {
  71. t.Fatal(err)
  72. }
  73. leader.Close()
  74. safeClose(t, consumer)
  75. safeClose(t, client)
  76. // we deliver one message, so it should be one higher than we return in the OffsetResponse
  77. if consumer.offset != 0x010102 {
  78. t.Error("Latest offset not fetched correctly:", consumer.offset)
  79. }
  80. }
  81. func TestConsumerFunnyOffsets(t *testing.T) {
  82. // for topics that are compressed and/or compacted (different things!) we have to be
  83. // able to handle receiving offsets that are non-sequential (though still strictly increasing) and
  84. // possibly starting prior to the actual value we requested
  85. seedBroker := newMockBroker(t, 1)
  86. leader := newMockBroker(t, 2)
  87. metadataResponse := new(MetadataResponse)
  88. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  89. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
  90. seedBroker.Returns(metadataResponse)
  91. fetchResponse := new(FetchResponse)
  92. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(1))
  93. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(3))
  94. leader.Returns(fetchResponse)
  95. fetchResponse = new(FetchResponse)
  96. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(5))
  97. leader.Returns(fetchResponse)
  98. client, err := NewClient([]string{seedBroker.Addr()}, nil)
  99. if err != nil {
  100. t.Fatal(err)
  101. }
  102. master, err := NewConsumer(client, nil)
  103. if err != nil {
  104. t.Fatal(err)
  105. }
  106. consumer, err := master.ConsumePartition("my_topic", 0, 2)
  107. message := <-consumer.Messages()
  108. if message.Offset != 3 {
  109. t.Error("Incorrect message offset!")
  110. }
  111. leader.Close()
  112. seedBroker.Close()
  113. safeClose(t, consumer)
  114. safeClose(t, client)
  115. }
  116. func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
  117. // initial setup
  118. seedBroker := newMockBroker(t, 1)
  119. leader0 := newMockBroker(t, 2)
  120. leader1 := newMockBroker(t, 3)
  121. metadataResponse := new(MetadataResponse)
  122. metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID())
  123. metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID())
  124. metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError)
  125. metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
  126. seedBroker.Returns(metadataResponse)
  127. // launch test goroutines
  128. client, err := NewClient([]string{seedBroker.Addr()}, nil)
  129. if err != nil {
  130. t.Fatal(err)
  131. }
  132. master, err := NewConsumer(client, nil)
  133. if err != nil {
  134. t.Fatal(err)
  135. }
  136. // we expect to end up (eventually) consuming exactly ten messages on each partition
  137. var wg sync.WaitGroup
  138. for i := 0; i < 2; i++ {
  139. consumer, err := master.ConsumePartition("my_topic", int32(i), 0)
  140. if err != nil {
  141. t.Error(err)
  142. }
  143. go func(c *PartitionConsumer) {
  144. for err := range c.Errors() {
  145. t.Error(err)
  146. }
  147. }(consumer)
  148. wg.Add(1)
  149. go func(partition int32, c *PartitionConsumer) {
  150. for i := 0; i < 10; i++ {
  151. message := <-consumer.Messages()
  152. if message.Offset != int64(i) {
  153. t.Error("Incorrect message offset!", i, partition, message.Offset)
  154. }
  155. if message.Partition != partition {
  156. t.Error("Incorrect message partition!")
  157. }
  158. }
  159. safeClose(t, consumer)
  160. wg.Done()
  161. }(int32(i), consumer)
  162. }
  163. // leader0 provides first four messages on partition 0
  164. fetchResponse := new(FetchResponse)
  165. for i := 0; i < 4; i++ {
  166. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i))
  167. }
  168. leader0.Returns(fetchResponse)
  169. // leader0 says no longer leader of partition 0
  170. fetchResponse = new(FetchResponse)
  171. fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
  172. leader0.Returns(fetchResponse)
  173. // metadata assigns both partitions to leader1
  174. metadataResponse = new(MetadataResponse)
  175. metadataResponse.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
  176. metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
  177. seedBroker.Returns(metadataResponse)
  178. time.Sleep(5 * time.Millisecond) // dumbest way to force a particular response ordering
  179. // leader1 provides five messages on partition 1
  180. fetchResponse = new(FetchResponse)
  181. for i := 0; i < 5; i++ {
  182. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i))
  183. }
  184. leader1.Returns(fetchResponse)
  185. // leader1 provides three more messages on both partitions
  186. fetchResponse = new(FetchResponse)
  187. for i := 0; i < 3; i++ {
  188. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+4))
  189. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+5))
  190. }
  191. leader1.Returns(fetchResponse)
  192. // leader1 provides three more messages on partition0, says no longer leader of partition1
  193. fetchResponse = new(FetchResponse)
  194. for i := 0; i < 3; i++ {
  195. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+7))
  196. }
  197. fetchResponse.AddError("my_topic", 1, ErrNotLeaderForPartition)
  198. leader1.Returns(fetchResponse)
  199. // metadata assigns 0 to leader1 and 1 to leader0
  200. metadataResponse = new(MetadataResponse)
  201. metadataResponse.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
  202. metadataResponse.AddTopicPartition("my_topic", 1, leader0.BrokerID(), nil, nil, ErrNoError)
  203. seedBroker.Returns(metadataResponse)
  204. time.Sleep(5 * time.Millisecond) // dumbest way to force a particular response ordering
  205. // leader0 provides two messages on partition 1
  206. fetchResponse = new(FetchResponse)
  207. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(8))
  208. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(9))
  209. leader0.Returns(fetchResponse)
  210. // leader0 provides last message on partition 1
  211. fetchResponse = new(FetchResponse)
  212. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(10))
  213. leader0.Returns(fetchResponse)
  214. // leader1 provides last message on partition 0
  215. fetchResponse = new(FetchResponse)
  216. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(10))
  217. leader1.Returns(fetchResponse)
  218. wg.Wait()
  219. leader1.Close()
  220. leader0.Close()
  221. seedBroker.Close()
  222. safeClose(t, client)
  223. }
  224. func ExampleConsumerWithSelect() {
  225. client, err := NewClient([]string{"localhost:9092"}, nil)
  226. if err != nil {
  227. panic(err)
  228. } else {
  229. fmt.Println("> connected")
  230. }
  231. defer func() {
  232. if err := client.Close(); err != nil {
  233. panic(err)
  234. }
  235. }()
  236. master, err := NewConsumer(client, nil)
  237. if err != nil {
  238. panic(err)
  239. } else {
  240. fmt.Println("> master consumer ready")
  241. }
  242. consumer, err := master.ConsumePartition("my_topic", 0, 0)
  243. if err != nil {
  244. panic(err)
  245. } else {
  246. fmt.Println("> consumer ready")
  247. }
  248. defer func() {
  249. if err := consumer.Close(); err != nil {
  250. panic(err)
  251. }
  252. }()
  253. msgCount := 0
  254. consumerLoop:
  255. for {
  256. select {
  257. case err := <-consumer.Errors():
  258. panic(err)
  259. case <-consumer.Messages():
  260. msgCount++
  261. case <-time.After(5 * time.Second):
  262. fmt.Println("> timed out")
  263. break consumerLoop
  264. }
  265. }
  266. fmt.Println("Got", msgCount, "messages.")
  267. }
  268. func ExampleConsumerWithGoroutines() {
  269. client, err := NewClient([]string{"localhost:9092"}, nil)
  270. if err != nil {
  271. panic(err)
  272. } else {
  273. fmt.Println("> connected")
  274. }
  275. defer func() {
  276. if err := client.Close(); err != nil {
  277. panic(err)
  278. }
  279. }()
  280. master, err := NewConsumer(client, nil)
  281. if err != nil {
  282. panic(err)
  283. } else {
  284. fmt.Println("> master consumer ready")
  285. }
  286. consumer, err := master.ConsumePartition("my_topic", 0, 0)
  287. if err != nil {
  288. panic(err)
  289. } else {
  290. fmt.Println("> consumer ready")
  291. }
  292. defer func() {
  293. if err := consumer.Close(); err != nil {
  294. panic(err)
  295. }
  296. }()
  297. var (
  298. wg sync.WaitGroup
  299. msgCount int
  300. )
  301. wg.Add(1)
  302. go func() {
  303. defer wg.Done()
  304. for message := range consumer.Messages() {
  305. fmt.Printf("Consumed message with offset %d", message.Offset)
  306. msgCount++
  307. }
  308. }()
  309. wg.Add(1)
  310. go func() {
  311. defer wg.Done()
  312. for err := range consumer.Errors() {
  313. fmt.Println(err)
  314. }
  315. }()
  316. wg.Wait()
  317. fmt.Println("Got", msgCount, "messages.")
  318. }