utils.go 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. package sarama
  2. import (
  3. "bufio"
  4. "fmt"
  5. "net"
  6. "regexp"
  7. )
  8. type none struct{}
  9. // make []int32 sortable so we can sort partition numbers
  10. type int32Slice []int32
  11. func (slice int32Slice) Len() int {
  12. return len(slice)
  13. }
  14. func (slice int32Slice) Less(i, j int) bool {
  15. return slice[i] < slice[j]
  16. }
  17. func (slice int32Slice) Swap(i, j int) {
  18. slice[i], slice[j] = slice[j], slice[i]
  19. }
  20. func dupInt32Slice(input []int32) []int32 {
  21. ret := make([]int32, 0, len(input))
  22. ret = append(ret, input...)
  23. return ret
  24. }
  25. func withRecover(fn func()) {
  26. defer func() {
  27. handler := PanicHandler
  28. if handler != nil {
  29. if err := recover(); err != nil {
  30. handler(err)
  31. }
  32. }
  33. }()
  34. fn()
  35. }
  36. func safeAsyncClose(b *Broker) {
  37. tmp := b // local var prevents clobbering in goroutine
  38. go withRecover(func() {
  39. if connected, _ := tmp.Connected(); connected {
  40. if err := tmp.Close(); err != nil {
  41. Logger.Println("Error closing broker", tmp.ID(), ":", err)
  42. }
  43. }
  44. })
  45. }
  46. // Encoder is a simple interface for any type that can be encoded as an array of bytes
  47. // in order to be sent as the key or value of a Kafka message. Length() is provided as an
  48. // optimization, and must return the same as len() on the result of Encode().
  49. type Encoder interface {
  50. Encode() ([]byte, error)
  51. Length() int
  52. }
  53. // make strings and byte slices encodable for convenience so they can be used as keys
  54. // and/or values in kafka messages
  55. // StringEncoder implements the Encoder interface for Go strings so that they can be used
  56. // as the Key or Value in a ProducerMessage.
  57. type StringEncoder string
  58. func (s StringEncoder) Encode() ([]byte, error) {
  59. return []byte(s), nil
  60. }
  61. func (s StringEncoder) Length() int {
  62. return len(s)
  63. }
  64. // ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
  65. // as the Key or Value in a ProducerMessage.
  66. type ByteEncoder []byte
  67. func (b ByteEncoder) Encode() ([]byte, error) {
  68. return b, nil
  69. }
  70. func (b ByteEncoder) Length() int {
  71. return len(b)
  72. }
  73. // bufConn wraps a net.Conn with a buffer for reads to reduce the number of
  74. // reads that trigger syscalls.
  75. type bufConn struct {
  76. net.Conn
  77. buf *bufio.Reader
  78. }
  79. func newBufConn(conn net.Conn) *bufConn {
  80. return &bufConn{
  81. Conn: conn,
  82. buf: bufio.NewReader(conn),
  83. }
  84. }
  85. func (bc *bufConn) Read(b []byte) (n int, err error) {
  86. return bc.buf.Read(b)
  87. }
  88. // KafkaVersion instances represent versions of the upstream Kafka broker.
  89. type KafkaVersion struct {
  90. // it's a struct rather than just typing the array directly to make it opaque and stop people
  91. // generating their own arbitrary versions
  92. version [4]uint
  93. }
  94. func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
  95. return KafkaVersion{
  96. version: [4]uint{major, minor, veryMinor, patch},
  97. }
  98. }
  99. // IsAtLeast return true if and only if the version it is called on is
  100. // greater than or equal to the version passed in:
  101. // V1.IsAtLeast(V2) // false
  102. // V2.IsAtLeast(V1) // true
  103. func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
  104. for i := range v.version {
  105. if v.version[i] > other.version[i] {
  106. return true
  107. } else if v.version[i] < other.version[i] {
  108. return false
  109. }
  110. }
  111. return true
  112. }
  113. // Effective constants defining the supported kafka versions.
  114. var (
  115. V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
  116. V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
  117. V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
  118. V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
  119. V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
  120. V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
  121. V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
  122. V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
  123. V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
  124. V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
  125. V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
  126. V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
  127. V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
  128. V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
  129. V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
  130. V1_1_0_0 = newKafkaVersion(1, 1, 0, 0)
  131. V1_1_1_0 = newKafkaVersion(1, 1, 1, 0)
  132. V2_0_0_0 = newKafkaVersion(2, 0, 0, 0)
  133. V2_0_1_0 = newKafkaVersion(2, 0, 1, 0)
  134. V2_1_0_0 = newKafkaVersion(2, 1, 0, 0)
  135. V2_2_0_0 = newKafkaVersion(2, 2, 0, 0)
  136. V2_3_0_0 = newKafkaVersion(2, 3, 0, 0)
  137. V2_4_0_0 = newKafkaVersion(2, 4, 0, 0)
  138. V2_5_0_0 = newKafkaVersion(2, 5, 0, 0)
  139. SupportedVersions = []KafkaVersion{
  140. V0_8_2_0,
  141. V0_8_2_1,
  142. V0_8_2_2,
  143. V0_9_0_0,
  144. V0_9_0_1,
  145. V0_10_0_0,
  146. V0_10_0_1,
  147. V0_10_1_0,
  148. V0_10_1_1,
  149. V0_10_2_0,
  150. V0_10_2_1,
  151. V0_11_0_0,
  152. V0_11_0_1,
  153. V0_11_0_2,
  154. V1_0_0_0,
  155. V1_1_0_0,
  156. V1_1_1_0,
  157. V2_0_0_0,
  158. V2_0_1_0,
  159. V2_1_0_0,
  160. V2_2_0_0,
  161. V2_3_0_0,
  162. V2_4_0_0,
  163. V2_5_0_0,
  164. }
  165. MinVersion = V0_8_2_0
  166. MaxVersion = V2_5_0_0
  167. )
  168. //ParseKafkaVersion parses and returns kafka version or error from a string
  169. func ParseKafkaVersion(s string) (KafkaVersion, error) {
  170. if len(s) < 5 {
  171. return MinVersion, fmt.Errorf("invalid version `%s`", s)
  172. }
  173. var major, minor, veryMinor, patch uint
  174. var err error
  175. if s[0] == '0' {
  176. err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
  177. } else {
  178. err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
  179. }
  180. if err != nil {
  181. return MinVersion, err
  182. }
  183. return newKafkaVersion(major, minor, veryMinor, patch), nil
  184. }
  185. func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
  186. if !regexp.MustCompile(pattern).MatchString(s) {
  187. return fmt.Errorf("invalid version `%s`", s)
  188. }
  189. _, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
  190. return err
  191. }
  192. func (v KafkaVersion) String() string {
  193. if v.version[0] == 0 {
  194. return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
  195. }
  196. return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
  197. }