functional_test.go 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. package sarama
  2. import (
  3. "fmt"
  4. "net"
  5. "os"
  6. "strings"
  7. "sync"
  8. "testing"
  9. "time"
  10. )
  11. const (
  12. TestBatchSize = 1000
  13. VagrantKafkaPeers = "192.168.100.67:6667,192.168.100.67:6668,192.168.100.67:6669,192.168.100.67:6670,192.168.100.67:6671"
  14. VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185"
  15. )
  16. var (
  17. kafkaIsAvailable, kafkaShouldBeAvailable bool
  18. kafkaBrokers []string
  19. )
  20. func init() {
  21. kafkaPeers := os.Getenv("KAFKA_PEERS")
  22. if kafkaPeers == "" {
  23. kafkaPeers = VagrantKafkaPeers
  24. }
  25. kafkaBrokers = strings.Split(kafkaPeers, ",")
  26. if c, err := net.Dial("tcp", kafkaBrokers[0]); err == nil {
  27. if err = c.Close(); err == nil {
  28. kafkaIsAvailable = true
  29. }
  30. }
  31. kafkaShouldBeAvailable = os.Getenv("CI") != ""
  32. }
  33. func checkKafkaAvailability(t *testing.T) {
  34. if !kafkaIsAvailable {
  35. if kafkaShouldBeAvailable {
  36. t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
  37. } else {
  38. t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
  39. }
  40. }
  41. }
  42. func TestFuncConnectionFailure(t *testing.T) {
  43. config := NewConfig()
  44. config.Metadata.Retry.Max = 1
  45. _, err := NewClient([]string{"localhost:9000"}, config)
  46. if err != ErrOutOfBrokers {
  47. t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err)
  48. }
  49. }
  50. func TestFuncProducing(t *testing.T) {
  51. config := NewConfig()
  52. testProducingMessages(t, config)
  53. }
  54. func TestFuncProducingGzip(t *testing.T) {
  55. config := NewConfig()
  56. config.Producer.Compression = CompressionGZIP
  57. testProducingMessages(t, config)
  58. }
  59. func TestFuncProducingSnappy(t *testing.T) {
  60. config := NewConfig()
  61. config.Producer.Compression = CompressionSnappy
  62. testProducingMessages(t, config)
  63. }
  64. func TestFuncProducingNoResponse(t *testing.T) {
  65. config := NewConfig()
  66. config.Producer.RequiredAcks = NoResponse
  67. testProducingMessages(t, config)
  68. }
  69. func TestFuncProducingFlushing(t *testing.T) {
  70. config := NewConfig()
  71. config.Producer.Flush.Messages = TestBatchSize / 8
  72. config.Producer.Flush.Frequency = 250 * time.Millisecond
  73. testProducingMessages(t, config)
  74. }
  75. func TestFuncMultiPartitionProduce(t *testing.T) {
  76. checkKafkaAvailability(t)
  77. config := NewConfig()
  78. config.ChannelBufferSize = 20
  79. config.Producer.Flush.Frequency = 50 * time.Millisecond
  80. config.Producer.Flush.Messages = 200
  81. config.Producer.Return.Successes = true
  82. producer, err := NewAsyncProducer(kafkaBrokers, config)
  83. if err != nil {
  84. t.Fatal(err)
  85. }
  86. var wg sync.WaitGroup
  87. wg.Add(TestBatchSize)
  88. for i := 1; i <= TestBatchSize; i++ {
  89. go func(i int, w *sync.WaitGroup) {
  90. defer w.Done()
  91. msg := &ProducerMessage{Topic: "multi_partition", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))}
  92. producer.Input() <- msg
  93. select {
  94. case ret := <-producer.Errors():
  95. t.Fatal(ret.Err)
  96. case <-producer.Successes():
  97. }
  98. }(i, &wg)
  99. }
  100. wg.Wait()
  101. if err := producer.Close(); err != nil {
  102. t.Error(err)
  103. }
  104. }
  105. func testProducingMessages(t *testing.T, config *Config) {
  106. checkKafkaAvailability(t)
  107. config.Producer.Return.Successes = true
  108. config.Consumer.Return.Errors = true
  109. client, err := NewClient(kafkaBrokers, config)
  110. if err != nil {
  111. t.Fatal(err)
  112. }
  113. master, err := NewConsumerFromClient(client)
  114. if err != nil {
  115. t.Fatal(err)
  116. }
  117. consumer, err := master.ConsumePartition("single_partition", 0, OffsetNewest)
  118. if err != nil {
  119. t.Fatal(err)
  120. }
  121. producer, err := NewAsyncProducerFromClient(client)
  122. if err != nil {
  123. t.Fatal(err)
  124. }
  125. expectedResponses := TestBatchSize
  126. for i := 1; i <= TestBatchSize; {
  127. msg := &ProducerMessage{Topic: "single_partition", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))}
  128. select {
  129. case producer.Input() <- msg:
  130. i++
  131. case ret := <-producer.Errors():
  132. t.Fatal(ret.Err)
  133. case <-producer.Successes():
  134. expectedResponses--
  135. }
  136. }
  137. for expectedResponses > 0 {
  138. select {
  139. case ret := <-producer.Errors():
  140. t.Fatal(ret.Err)
  141. case <-producer.Successes():
  142. expectedResponses--
  143. }
  144. }
  145. safeClose(t, producer)
  146. for i := 1; i <= TestBatchSize; i++ {
  147. select {
  148. case <-time.After(10 * time.Second):
  149. t.Fatal("Not received any more events in the last 10 seconds.")
  150. case err := <-consumer.Errors():
  151. t.Error(err)
  152. case message := <-consumer.Messages():
  153. if string(message.Value) != fmt.Sprintf("testing %d", i) {
  154. t.Fatalf("Unexpected message with index %d: %s", i, message.Value)
  155. }
  156. }
  157. }
  158. safeClose(t, consumer)
  159. safeClose(t, client)
  160. }