functional_test.go 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. package sarama
  2. import (
  3. "fmt"
  4. "net"
  5. "os"
  6. "sync"
  7. "testing"
  8. "time"
  9. )
  10. const (
  11. TestBatchSize = 1000
  12. )
  13. var (
  14. kafkaIsAvailable, kafkaShouldBeAvailable bool
  15. kafkaAddr string
  16. )
  17. func init() {
  18. kafkaAddr = os.Getenv("KAFKA_ADDR")
  19. if kafkaAddr == "" {
  20. kafkaAddr = "localhost:6667"
  21. }
  22. if c, err := net.Dial("tcp", kafkaAddr); err == nil {
  23. if err = c.Close(); err == nil {
  24. kafkaIsAvailable = true
  25. }
  26. }
  27. kafkaShouldBeAvailable = os.Getenv("CI") != ""
  28. }
  29. func checkKafkaAvailability(t *testing.T) {
  30. if !kafkaIsAvailable {
  31. if kafkaShouldBeAvailable {
  32. t.Fatalf("Kafka broker is not available on %s. Set KAFKA_ADDR to connect to Kafka on a different location.", kafkaAddr)
  33. } else {
  34. t.Skipf("Kafka broker is not available on %s. Set KAFKA_ADDR to connect to Kafka on a different location.", kafkaAddr)
  35. }
  36. }
  37. }
  38. func TestFuncConnectionFailure(t *testing.T) {
  39. config := NewConfig()
  40. config.Metadata.Retry.Max = 1
  41. _, err := NewClient([]string{"localhost:9000"}, config)
  42. if err != ErrOutOfBrokers {
  43. t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err)
  44. }
  45. }
  46. func TestFuncProducing(t *testing.T) {
  47. config := NewConfig()
  48. testProducingMessages(t, config)
  49. }
  50. func TestFuncProducingGzip(t *testing.T) {
  51. config := NewConfig()
  52. config.Producer.Compression = CompressionGZIP
  53. testProducingMessages(t, config)
  54. }
  55. func TestFuncProducingSnappy(t *testing.T) {
  56. config := NewConfig()
  57. config.Producer.Compression = CompressionSnappy
  58. testProducingMessages(t, config)
  59. }
  60. func TestFuncProducingNoResponse(t *testing.T) {
  61. config := NewConfig()
  62. config.Producer.RequiredAcks = NoResponse
  63. testProducingMessages(t, config)
  64. }
  65. func TestFuncProducingFlushing(t *testing.T) {
  66. config := NewConfig()
  67. config.Producer.Flush.Messages = TestBatchSize / 8
  68. config.Producer.Flush.Frequency = 250 * time.Millisecond
  69. testProducingMessages(t, config)
  70. }
  71. func TestFuncMultiPartitionProduce(t *testing.T) {
  72. checkKafkaAvailability(t)
  73. config := NewConfig()
  74. config.ChannelBufferSize = 20
  75. config.Producer.Flush.Frequency = 50 * time.Millisecond
  76. config.Producer.Flush.Messages = 200
  77. config.Producer.ReturnSuccesses = true
  78. producer, err := NewProducer([]string{kafkaAddr}, config)
  79. if err != nil {
  80. t.Fatal(err)
  81. }
  82. var wg sync.WaitGroup
  83. wg.Add(TestBatchSize)
  84. for i := 1; i <= TestBatchSize; i++ {
  85. go func(i int, w *sync.WaitGroup) {
  86. defer w.Done()
  87. msg := &ProducerMessage{Topic: "multi_partition", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))}
  88. producer.Input() <- msg
  89. select {
  90. case ret := <-producer.Errors():
  91. t.Fatal(ret.Err)
  92. case <-producer.Successes():
  93. }
  94. }(i, &wg)
  95. }
  96. wg.Wait()
  97. if err := producer.Close(); err != nil {
  98. t.Error(err)
  99. }
  100. }
  101. func testProducingMessages(t *testing.T, config *Config) {
  102. checkKafkaAvailability(t)
  103. config.Producer.ReturnSuccesses = true
  104. config.Consumer.ReturnErrors = true
  105. client, err := NewClient([]string{kafkaAddr}, config)
  106. if err != nil {
  107. t.Fatal(err)
  108. }
  109. master, err := NewConsumerFromClient(client)
  110. if err != nil {
  111. t.Fatal(err)
  112. }
  113. consumer, err := master.ConsumePartition("single_partition", 0, OffsetNewest)
  114. if err != nil {
  115. t.Fatal(err)
  116. }
  117. producer, err := NewProducerFromClient(client)
  118. if err != nil {
  119. t.Fatal(err)
  120. }
  121. expectedResponses := TestBatchSize
  122. for i := 1; i <= TestBatchSize; {
  123. msg := &ProducerMessage{Topic: "single_partition", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))}
  124. select {
  125. case producer.Input() <- msg:
  126. i++
  127. case ret := <-producer.Errors():
  128. t.Fatal(ret.Err)
  129. case <-producer.Successes():
  130. expectedResponses--
  131. }
  132. }
  133. for expectedResponses > 0 {
  134. select {
  135. case ret := <-producer.Errors():
  136. t.Fatal(ret.Err)
  137. case <-producer.Successes():
  138. expectedResponses--
  139. }
  140. }
  141. safeClose(t, producer)
  142. for i := 1; i <= TestBatchSize; i++ {
  143. select {
  144. case <-time.After(10 * time.Second):
  145. t.Fatal("Not received any more events in the last 10 seconds.")
  146. case err := <-consumer.Errors():
  147. t.Error(err)
  148. case message := <-consumer.Messages():
  149. if string(message.Value) != fmt.Sprintf("testing %d", i) {
  150. t.Fatalf("Unexpected message with index %d: %s", i, message.Value)
  151. }
  152. }
  153. }
  154. safeClose(t, consumer)
  155. safeClose(t, client)
  156. }