consumer_test.go 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. package sarama
  2. import (
  3. "fmt"
  4. "sync"
  5. "testing"
  6. "time"
  7. )
  8. func TestDefaultConsumerConfigValidates(t *testing.T) {
  9. config := NewConsumerConfig()
  10. if err := config.Validate(); err != nil {
  11. t.Error(err)
  12. }
  13. }
  14. func TestDefaultPartitionConsumerConfigValidates(t *testing.T) {
  15. config := NewPartitionConsumerConfig()
  16. if err := config.Validate(); err != nil {
  17. t.Error(err)
  18. }
  19. }
  20. func TestConsumerOffsetManual(t *testing.T) {
  21. seedBroker := NewMockBroker(t, 1)
  22. leader := NewMockBroker(t, 2)
  23. metadataResponse := new(MetadataResponse)
  24. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  25. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, NoError)
  26. seedBroker.Returns(metadataResponse)
  27. for i := 0; i <= 10; i++ {
  28. fetchResponse := new(FetchResponse)
  29. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+1234))
  30. leader.Returns(fetchResponse)
  31. }
  32. client, err := NewClient("client_id", []string{seedBroker.Addr()}, nil)
  33. if err != nil {
  34. t.Fatal(err)
  35. }
  36. master, err := NewConsumer(client, nil)
  37. if err != nil {
  38. t.Fatal(err)
  39. }
  40. config := NewPartitionConsumerConfig()
  41. config.OffsetMethod = OffsetMethodManual
  42. config.OffsetValue = 1234
  43. consumer, err := master.ConsumePartition("my_topic", 0, config)
  44. if err != nil {
  45. t.Fatal(err)
  46. }
  47. seedBroker.Close()
  48. for i := 0; i < 10; i++ {
  49. event := <-consumer.Messages()
  50. if event.Offset != int64(i+1234) {
  51. t.Error("Incorrect message offset!")
  52. }
  53. }
  54. safeClose(t, consumer)
  55. safeClose(t, client)
  56. leader.Close()
  57. }
  58. func TestConsumerLatestOffset(t *testing.T) {
  59. seedBroker := NewMockBroker(t, 1)
  60. leader := NewMockBroker(t, 2)
  61. metadataResponse := new(MetadataResponse)
  62. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  63. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, NoError)
  64. seedBroker.Returns(metadataResponse)
  65. offsetResponse := new(OffsetResponse)
  66. offsetResponse.AddTopicPartition("my_topic", 0, 0x010101)
  67. leader.Returns(offsetResponse)
  68. fetchResponse := new(FetchResponse)
  69. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), 0x010101)
  70. leader.Returns(fetchResponse)
  71. client, err := NewClient("client_id", []string{seedBroker.Addr()}, nil)
  72. if err != nil {
  73. t.Fatal(err)
  74. }
  75. seedBroker.Close()
  76. master, err := NewConsumer(client, nil)
  77. if err != nil {
  78. t.Fatal(err)
  79. }
  80. config := NewPartitionConsumerConfig()
  81. config.OffsetMethod = OffsetMethodNewest
  82. consumer, err := master.ConsumePartition("my_topic", 0, config)
  83. if err != nil {
  84. t.Fatal(err)
  85. }
  86. leader.Close()
  87. safeClose(t, consumer)
  88. safeClose(t, client)
  89. // we deliver one message, so it should be one higher than we return in the OffsetResponse
  90. if consumer.offset != 0x010102 {
  91. t.Error("Latest offset not fetched correctly:", consumer.offset)
  92. }
  93. }
  94. func TestConsumerFunnyOffsets(t *testing.T) {
  95. // for topics that are compressed and/or compacted (different things!) we have to be
  96. // able to handle receiving offsets that are non-sequential (though still strictly increasing) and
  97. // possibly starting prior to the actual value we requested
  98. seedBroker := NewMockBroker(t, 1)
  99. leader := NewMockBroker(t, 2)
  100. metadataResponse := new(MetadataResponse)
  101. metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
  102. metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, NoError)
  103. seedBroker.Returns(metadataResponse)
  104. fetchResponse := new(FetchResponse)
  105. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(1))
  106. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(3))
  107. leader.Returns(fetchResponse)
  108. fetchResponse = new(FetchResponse)
  109. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(5))
  110. leader.Returns(fetchResponse)
  111. client, err := NewClient("client_id", []string{seedBroker.Addr()}, nil)
  112. if err != nil {
  113. t.Fatal(err)
  114. }
  115. master, err := NewConsumer(client, nil)
  116. if err != nil {
  117. t.Fatal(err)
  118. }
  119. config := NewPartitionConsumerConfig()
  120. config.OffsetMethod = OffsetMethodManual
  121. config.OffsetValue = 2
  122. consumer, err := master.ConsumePartition("my_topic", 0, config)
  123. message := <-consumer.Messages()
  124. if message.Offset != 3 {
  125. t.Error("Incorrect message offset!")
  126. }
  127. leader.Close()
  128. seedBroker.Close()
  129. safeClose(t, consumer)
  130. safeClose(t, client)
  131. }
  132. func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
  133. // initial setup
  134. seedBroker := NewMockBroker(t, 1)
  135. leader0 := NewMockBroker(t, 2)
  136. leader1 := NewMockBroker(t, 3)
  137. metadataResponse := new(MetadataResponse)
  138. metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID())
  139. metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID())
  140. metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, NoError)
  141. metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, NoError)
  142. seedBroker.Returns(metadataResponse)
  143. // launch test goroutines
  144. client, err := NewClient("client_id", []string{seedBroker.Addr()}, nil)
  145. if err != nil {
  146. t.Fatal(err)
  147. }
  148. master, err := NewConsumer(client, nil)
  149. if err != nil {
  150. t.Fatal(err)
  151. }
  152. config := NewPartitionConsumerConfig()
  153. config.OffsetMethod = OffsetMethodManual
  154. config.OffsetValue = 0
  155. // we expect to end up (eventually) consuming exactly ten messages on each partition
  156. var wg sync.WaitGroup
  157. for i := 0; i < 2; i++ {
  158. consumer, err := master.ConsumePartition("my_topic", int32(i), config)
  159. if err != nil {
  160. t.Error(err)
  161. }
  162. wg.Add(1)
  163. go func(partition int32, c *PartitionConsumer) {
  164. for i := 0; i < 10; i++ {
  165. message := <-consumer.Messages()
  166. if message.Offset != int64(i) {
  167. t.Error("Incorrect message offset!", i, partition, message.Offset)
  168. }
  169. if message.Partition != partition {
  170. t.Error("Incorrect message partition!")
  171. }
  172. }
  173. safeClose(t, consumer)
  174. wg.Done()
  175. }(int32(i), consumer)
  176. }
  177. // leader0 provides first four messages on partition 0
  178. fetchResponse := new(FetchResponse)
  179. for i := 0; i < 4; i++ {
  180. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i))
  181. }
  182. leader0.Returns(fetchResponse)
  183. // leader0 says no longer leader of partition 0
  184. fetchResponse = new(FetchResponse)
  185. fetchResponse.AddError("my_topic", 0, NotLeaderForPartition)
  186. leader0.Returns(fetchResponse)
  187. // metadata assigns both partitions to leader1
  188. metadataResponse = new(MetadataResponse)
  189. metadataResponse.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, NoError)
  190. metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, NoError)
  191. seedBroker.Returns(metadataResponse)
  192. time.Sleep(5 * time.Millisecond) // dumbest way to force a particular response ordering
  193. // leader1 provides five messages on partition 1
  194. fetchResponse = new(FetchResponse)
  195. for i := 0; i < 5; i++ {
  196. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i))
  197. }
  198. leader1.Returns(fetchResponse)
  199. // leader1 provides three more messages on both partitions
  200. fetchResponse = new(FetchResponse)
  201. for i := 0; i < 3; i++ {
  202. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+4))
  203. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+5))
  204. }
  205. leader1.Returns(fetchResponse)
  206. // leader1 provides three more messages on partition0, says no longer leader of partition1
  207. fetchResponse = new(FetchResponse)
  208. for i := 0; i < 3; i++ {
  209. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i+7))
  210. }
  211. fetchResponse.AddError("my_topic", 1, NotLeaderForPartition)
  212. leader1.Returns(fetchResponse)
  213. // metadata assigns 0 to leader1 and 1 to leader0
  214. metadataResponse = new(MetadataResponse)
  215. metadataResponse.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, NoError)
  216. metadataResponse.AddTopicPartition("my_topic", 1, leader0.BrokerID(), nil, nil, NoError)
  217. seedBroker.Returns(metadataResponse)
  218. time.Sleep(5 * time.Millisecond) // dumbest way to force a particular response ordering
  219. // leader0 provides two messages on partition 1
  220. fetchResponse = new(FetchResponse)
  221. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(8))
  222. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(9))
  223. leader0.Returns(fetchResponse)
  224. // leader0 provides last message on partition 1
  225. fetchResponse = new(FetchResponse)
  226. fetchResponse.AddMessage("my_topic", 1, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(10))
  227. leader0.Returns(fetchResponse)
  228. // leader1 provides last message on partition 0
  229. fetchResponse = new(FetchResponse)
  230. fetchResponse.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(10))
  231. leader1.Returns(fetchResponse)
  232. wg.Wait()
  233. leader1.Close()
  234. leader0.Close()
  235. seedBroker.Close()
  236. safeClose(t, client)
  237. }
  238. func ExampleConsumer() {
  239. client, err := NewClient("my_client", []string{"localhost:9092"}, nil)
  240. if err != nil {
  241. panic(err)
  242. } else {
  243. fmt.Println("> connected")
  244. }
  245. defer client.Close()
  246. master, err := NewConsumer(client, nil)
  247. if err != nil {
  248. panic(err)
  249. } else {
  250. fmt.Println("> master consumer ready")
  251. }
  252. consumer, err := master.ConsumePartition("my_topic", 0, nil)
  253. if err != nil {
  254. panic(err)
  255. } else {
  256. fmt.Println("> consumer ready")
  257. }
  258. defer consumer.Close()
  259. msgCount := 0
  260. consumerLoop:
  261. for {
  262. select {
  263. case err := <-consumer.Errors():
  264. panic(err)
  265. case <-consumer.Messages():
  266. msgCount++
  267. case <-time.After(5 * time.Second):
  268. fmt.Println("> timed out")
  269. break consumerLoop
  270. }
  271. }
  272. fmt.Println("Got", msgCount, "messages.")
  273. }