functional_test.go 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. package sarama
  2. import (
  3. "fmt"
  4. "net"
  5. "os"
  6. "sync"
  7. "testing"
  8. "time"
  9. )
  10. const (
  11. TestBatchSize = 1000
  12. )
  13. var (
  14. kafkaIsAvailable, kafkaShouldBeAvailable bool
  15. kafkaAddr string
  16. )
  17. func init() {
  18. kafkaAddr = os.Getenv("KAFKA_ADDR")
  19. if kafkaAddr == "" {
  20. kafkaAddr = "localhost:6667"
  21. }
  22. if c, err := net.Dial("tcp", kafkaAddr); err == nil {
  23. if err = c.Close(); err == nil {
  24. kafkaIsAvailable = true
  25. }
  26. }
  27. kafkaShouldBeAvailable = os.Getenv("CI") != ""
  28. }
  29. func checkKafkaAvailability(t *testing.T) {
  30. if !kafkaIsAvailable {
  31. if kafkaShouldBeAvailable {
  32. t.Fatalf("Kafka broker is not available on %s. Set KAFKA_ADDR to connect to Kafka on a different location.", kafkaAddr)
  33. } else {
  34. t.Skipf("Kafka broker is not available on %s. Set KAFKA_ADDR to connect to Kafka on a different location.", kafkaAddr)
  35. }
  36. }
  37. }
  38. func TestFuncConnectionFailure(t *testing.T) {
  39. config := NewConfig()
  40. config.Metadata.Retries = 1
  41. _, err := NewClient([]string{"localhost:9000"}, config)
  42. if err != ErrOutOfBrokers {
  43. t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err)
  44. }
  45. }
  46. func TestFuncProducing(t *testing.T) {
  47. config := NewConfig()
  48. testProducingMessages(t, config)
  49. }
  50. func TestFuncProducingGzip(t *testing.T) {
  51. config := NewConfig()
  52. config.Producer.Compression = CompressionGZIP
  53. testProducingMessages(t, config)
  54. }
  55. func TestFuncProducingSnappy(t *testing.T) {
  56. config := NewConfig()
  57. config.Producer.Compression = CompressionSnappy
  58. testProducingMessages(t, config)
  59. }
  60. func TestFuncProducingNoResponse(t *testing.T) {
  61. config := NewConfig()
  62. config.Producer.RequiredAcks = NoResponse
  63. testProducingMessages(t, config)
  64. }
  65. func TestFuncProducingFlushing(t *testing.T) {
  66. config := NewConfig()
  67. config.Producer.Flush.Messages = TestBatchSize / 8
  68. config.Producer.Flush.Frequency = 250 * time.Millisecond
  69. testProducingMessages(t, config)
  70. }
  71. func TestFuncMultiPartitionProduce(t *testing.T) {
  72. checkKafkaAvailability(t)
  73. client, err := NewClient([]string{kafkaAddr}, nil)
  74. if err != nil {
  75. t.Fatal(err)
  76. }
  77. defer safeClose(t, client)
  78. config := NewConfig()
  79. config.ChannelBufferSize = 20
  80. config.Producer.Flush.Frequency = 50 * time.Millisecond
  81. config.Producer.Flush.Messages = 200
  82. config.Producer.AckSuccesses = true
  83. producer, err := NewProducer(client, config)
  84. if err != nil {
  85. t.Fatal(err)
  86. }
  87. var wg sync.WaitGroup
  88. wg.Add(TestBatchSize)
  89. for i := 1; i <= TestBatchSize; i++ {
  90. go func(i int, w *sync.WaitGroup) {
  91. defer w.Done()
  92. msg := &ProducerMessage{Topic: "multi_partition", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))}
  93. producer.Input() <- msg
  94. select {
  95. case ret := <-producer.Errors():
  96. t.Fatal(ret.Err)
  97. case <-producer.Successes():
  98. }
  99. }(i, &wg)
  100. }
  101. wg.Wait()
  102. if err := producer.Close(); err != nil {
  103. t.Error(err)
  104. }
  105. }
  106. func testProducingMessages(t *testing.T, config *Config) {
  107. checkKafkaAvailability(t)
  108. client, err := NewClient([]string{kafkaAddr}, nil)
  109. if err != nil {
  110. t.Fatal(err)
  111. }
  112. master, err := NewConsumer(client, nil)
  113. if err != nil {
  114. t.Fatal(err)
  115. }
  116. consumer, err := master.ConsumePartition("single_partition", 0, OffsetNewest)
  117. if err != nil {
  118. t.Fatal(err)
  119. }
  120. config.Producer.AckSuccesses = true
  121. producer, err := NewProducer(client, config)
  122. if err != nil {
  123. t.Fatal(err)
  124. }
  125. expectedResponses := TestBatchSize
  126. for i := 1; i <= TestBatchSize; {
  127. msg := &ProducerMessage{Topic: "single_partition", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))}
  128. select {
  129. case producer.Input() <- msg:
  130. i++
  131. case ret := <-producer.Errors():
  132. t.Fatal(ret.Err)
  133. case <-producer.Successes():
  134. expectedResponses--
  135. }
  136. }
  137. for expectedResponses > 0 {
  138. select {
  139. case ret := <-producer.Errors():
  140. t.Fatal(ret.Err)
  141. case <-producer.Successes():
  142. expectedResponses--
  143. }
  144. }
  145. safeClose(t, producer)
  146. for i := 1; i <= TestBatchSize; i++ {
  147. select {
  148. case <-time.After(10 * time.Second):
  149. t.Fatal("Not received any more events in the last 10 seconds.")
  150. case err := <-consumer.Errors():
  151. t.Error(err)
  152. case message := <-consumer.Messages():
  153. if string(message.Value) != fmt.Sprintf("testing %d", i) {
  154. t.Fatalf("Unexpected message with index %d: %s", i, message.Value)
  155. }
  156. }
  157. }
  158. safeClose(t, consumer)
  159. safeClose(t, client)
  160. }