config.go 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. package sarama
  2. import (
  3. "compress/gzip"
  4. "crypto/tls"
  5. "fmt"
  6. "io/ioutil"
  7. "net"
  8. "regexp"
  9. "time"
  10. "github.com/rcrowley/go-metrics"
  11. )
  12. const defaultClientID = "sarama"
  13. var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
  14. // Config is used to pass multiple configuration options to Sarama's constructors.
  15. type Config struct {
  16. // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
  17. Admin struct {
  18. // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
  19. // including topics, brokers, configurations and ACLs (defaults to 3 seconds).
  20. Timeout time.Duration
  21. }
  22. // Net is the namespace for network-level properties used by the Broker, and
  23. // shared by the Client/Producer/Consumer.
  24. Net struct {
  25. // How many outstanding requests a connection is allowed to have before
  26. // sending on it blocks (default 5).
  27. MaxOpenRequests int
  28. // All three of the below configurations are similar to the
  29. // `socket.timeout.ms` setting in JVM kafka. All of them default
  30. // to 30 seconds.
  31. DialTimeout time.Duration // How long to wait for the initial connection.
  32. ReadTimeout time.Duration // How long to wait for a response.
  33. WriteTimeout time.Duration // How long to wait for a transmit.
  34. TLS struct {
  35. // Whether or not to use TLS when connecting to the broker
  36. // (defaults to false).
  37. Enable bool
  38. // The TLS configuration to use for secure connections if
  39. // enabled (defaults to nil).
  40. Config *tls.Config
  41. }
  42. // SASL based authentication with broker. While there are multiple SASL authentication methods
  43. // the current implementation is limited to plaintext (SASL/PLAIN) authentication
  44. SASL struct {
  45. // Whether or not to use SASL authentication when connecting to the broker
  46. // (defaults to false).
  47. Enable bool
  48. // SASLMechanism is the name of the enabled SASL mechanism.
  49. // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
  50. Mechanism SASLMechanism
  51. // Whether or not to send the Kafka SASL handshake first if enabled
  52. // (defaults to true). You should only set this to false if you're using
  53. // a non-Kafka SASL proxy.
  54. Handshake bool
  55. //username and password for SASL/PLAIN authentication
  56. User string
  57. Password string
  58. // TokenProvider is a user-defined callback for generating
  59. // access tokens for SASL/OAUTHBEARER auth. See the
  60. // AccessTokenProvider interface docs for proper implementation
  61. // guidelines.
  62. TokenProvider AccessTokenProvider
  63. }
  64. // KeepAlive specifies the keep-alive period for an active network connection.
  65. // If zero, keep-alives are disabled. (default is 0: disabled).
  66. KeepAlive time.Duration
  67. // LocalAddr is the local address to use when dialing an
  68. // address. The address must be of a compatible type for the
  69. // network being dialed.
  70. // If nil, a local address is automatically chosen.
  71. LocalAddr net.Addr
  72. }
  73. // Metadata is the namespace for metadata management properties used by the
  74. // Client, and shared by the Producer/Consumer.
  75. Metadata struct {
  76. Retry struct {
  77. // The total number of times to retry a metadata request when the
  78. // cluster is in the middle of a leader election (default 3).
  79. Max int
  80. // How long to wait for leader election to occur before retrying
  81. // (default 250ms). Similar to the JVM's `retry.backoff.ms`.
  82. Backoff time.Duration
  83. // Called to compute backoff time dynamically. Useful for implementing
  84. // more sophisticated backoff strategies. This takes precedence over
  85. // `Backoff` if set.
  86. BackoffFunc func(retries, maxRetries int) time.Duration
  87. }
  88. // How frequently to refresh the cluster metadata in the background.
  89. // Defaults to 10 minutes. Set to 0 to disable. Similar to
  90. // `topic.metadata.refresh.interval.ms` in the JVM version.
  91. RefreshFrequency time.Duration
  92. // Whether to maintain a full set of metadata for all topics, or just
  93. // the minimal set that has been necessary so far. The full set is simpler
  94. // and usually more convenient, but can take up a substantial amount of
  95. // memory if you have many topics and partitions. Defaults to true.
  96. Full bool
  97. }
  98. // Producer is the namespace for configuration related to producing messages,
  99. // used by the Producer.
  100. Producer struct {
  101. // The maximum permitted size of a message (defaults to 1000000). Should be
  102. // set equal to or smaller than the broker's `message.max.bytes`.
  103. MaxMessageBytes int
  104. // The level of acknowledgement reliability needed from the broker (defaults
  105. // to WaitForLocal). Equivalent to the `request.required.acks` setting of the
  106. // JVM producer.
  107. RequiredAcks RequiredAcks
  108. // The maximum duration the broker will wait the receipt of the number of
  109. // RequiredAcks (defaults to 10 seconds). This is only relevant when
  110. // RequiredAcks is set to WaitForAll or a number > 1. Only supports
  111. // millisecond resolution, nanoseconds will be truncated. Equivalent to
  112. // the JVM producer's `request.timeout.ms` setting.
  113. Timeout time.Duration
  114. // The type of compression to use on messages (defaults to no compression).
  115. // Similar to `compression.codec` setting of the JVM producer.
  116. Compression CompressionCodec
  117. // The level of compression to use on messages. The meaning depends
  118. // on the actual compression type used and defaults to default compression
  119. // level for the codec.
  120. CompressionLevel int
  121. // Generates partitioners for choosing the partition to send messages to
  122. // (defaults to hashing the message key). Similar to the `partitioner.class`
  123. // setting for the JVM producer.
  124. Partitioner PartitionerConstructor
  125. // If enabled, the producer will ensure that exactly one copy of each message is
  126. // written.
  127. Idempotent bool
  128. // Return specifies what channels will be populated. If they are set to true,
  129. // you must read from the respective channels to prevent deadlock. If,
  130. // however, this config is used to create a `SyncProducer`, both must be set
  131. // to true and you shall not read from the channels since the producer does
  132. // this internally.
  133. Return struct {
  134. // If enabled, successfully delivered messages will be returned on the
  135. // Successes channel (default disabled).
  136. Successes bool
  137. // If enabled, messages that failed to deliver will be returned on the
  138. // Errors channel, including error (default enabled).
  139. Errors bool
  140. }
  141. // The following config options control how often messages are batched up and
  142. // sent to the broker. By default, messages are sent as fast as possible, and
  143. // all messages received while the current batch is in-flight are placed
  144. // into the subsequent batch.
  145. Flush struct {
  146. // The best-effort number of bytes needed to trigger a flush. Use the
  147. // global sarama.MaxRequestSize to set a hard upper limit.
  148. Bytes int
  149. // The best-effort number of messages needed to trigger a flush. Use
  150. // `MaxMessages` to set a hard upper limit.
  151. Messages int
  152. // The best-effort frequency of flushes. Equivalent to
  153. // `queue.buffering.max.ms` setting of JVM producer.
  154. Frequency time.Duration
  155. // The maximum number of messages the producer will send in a single
  156. // broker request. Defaults to 0 for unlimited. Similar to
  157. // `queue.buffering.max.messages` in the JVM producer.
  158. MaxMessages int
  159. }
  160. Retry struct {
  161. // The total number of times to retry sending a message (default 3).
  162. // Similar to the `message.send.max.retries` setting of the JVM producer.
  163. Max int
  164. // How long to wait for the cluster to settle between retries
  165. // (default 100ms). Similar to the `retry.backoff.ms` setting of the
  166. // JVM producer.
  167. Backoff time.Duration
  168. // Called to compute backoff time dynamically. Useful for implementing
  169. // more sophisticated backoff strategies. This takes precedence over
  170. // `Backoff` if set.
  171. BackoffFunc func(retries, maxRetries int) time.Duration
  172. }
  173. }
  174. // Consumer is the namespace for configuration related to consuming messages,
  175. // used by the Consumer.
  176. Consumer struct {
  177. // Group is the namespace for configuring consumer group.
  178. Group struct {
  179. Session struct {
  180. // The timeout used to detect consumer failures when using Kafka's group management facility.
  181. // The consumer sends periodic heartbeats to indicate its liveness to the broker.
  182. // If no heartbeats are received by the broker before the expiration of this session timeout,
  183. // then the broker will remove this consumer from the group and initiate a rebalance.
  184. // Note that the value must be in the allowable range as configured in the broker configuration
  185. // by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
  186. Timeout time.Duration
  187. }
  188. Heartbeat struct {
  189. // The expected time between heartbeats to the consumer coordinator when using Kafka's group
  190. // management facilities. Heartbeats are used to ensure that the consumer's session stays active and
  191. // to facilitate rebalancing when new consumers join or leave the group.
  192. // The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
  193. // higher than 1/3 of that value.
  194. // It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
  195. Interval time.Duration
  196. }
  197. Rebalance struct {
  198. // Strategy for allocating topic partitions to members (default BalanceStrategyRange)
  199. Strategy BalanceStrategy
  200. // The maximum allowed time for each worker to join the group once a rebalance has begun.
  201. // This is basically a limit on the amount of time needed for all tasks to flush any pending
  202. // data and commit offsets. If the timeout is exceeded, then the worker will be removed from
  203. // the group, which will cause offset commit failures (default 60s).
  204. Timeout time.Duration
  205. Retry struct {
  206. // When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
  207. // the load to assign partitions to each consumer. If the set of consumers changes while
  208. // this assignment is taking place the rebalance will fail and retry. This setting controls
  209. // the maximum number of attempts before giving up (default 4).
  210. Max int
  211. // Backoff time between retries during rebalance (default 2s)
  212. Backoff time.Duration
  213. }
  214. }
  215. Member struct {
  216. // Custom metadata to include when joining the group. The user data for all joined members
  217. // can be retrieved by sending a DescribeGroupRequest to the broker that is the
  218. // coordinator for the group.
  219. UserData []byte
  220. }
  221. }
  222. Retry struct {
  223. // How long to wait after a failing to read from a partition before
  224. // trying again (default 2s).
  225. Backoff time.Duration
  226. // Called to compute backoff time dynamically. Useful for implementing
  227. // more sophisticated backoff strategies. This takes precedence over
  228. // `Backoff` if set.
  229. BackoffFunc func(retries int) time.Duration
  230. }
  231. // Fetch is the namespace for controlling how many bytes are retrieved by any
  232. // given request.
  233. Fetch struct {
  234. // The minimum number of message bytes to fetch in a request - the broker
  235. // will wait until at least this many are available. The default is 1,
  236. // as 0 causes the consumer to spin when no messages are available.
  237. // Equivalent to the JVM's `fetch.min.bytes`.
  238. Min int32
  239. // The default number of message bytes to fetch from the broker in each
  240. // request (default 1MB). This should be larger than the majority of
  241. // your messages, or else the consumer will spend a lot of time
  242. // negotiating sizes and not actually consuming. Similar to the JVM's
  243. // `fetch.message.max.bytes`.
  244. Default int32
  245. // The maximum number of message bytes to fetch from the broker in a
  246. // single request. Messages larger than this will return
  247. // ErrMessageTooLarge and will not be consumable, so you must be sure
  248. // this is at least as large as your largest message. Defaults to 0
  249. // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
  250. // global `sarama.MaxResponseSize` still applies.
  251. Max int32
  252. }
  253. // The maximum amount of time the broker will wait for Consumer.Fetch.Min
  254. // bytes to become available before it returns fewer than that anyways. The
  255. // default is 250ms, since 0 causes the consumer to spin when no events are
  256. // available. 100-500ms is a reasonable range for most cases. Kafka only
  257. // supports precision up to milliseconds; nanoseconds will be truncated.
  258. // Equivalent to the JVM's `fetch.wait.max.ms`.
  259. MaxWaitTime time.Duration
  260. // The maximum amount of time the consumer expects a message takes to
  261. // process for the user. If writing to the Messages channel takes longer
  262. // than this, that partition will stop fetching more messages until it
  263. // can proceed again.
  264. // Note that, since the Messages channel is buffered, the actual grace time is
  265. // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
  266. // If a message is not written to the Messages channel between two ticks
  267. // of the expiryTicker then a timeout is detected.
  268. // Using a ticker instead of a timer to detect timeouts should typically
  269. // result in many fewer calls to Timer functions which may result in a
  270. // significant performance improvement if many messages are being sent
  271. // and timeouts are infrequent.
  272. // The disadvantage of using a ticker instead of a timer is that
  273. // timeouts will be less accurate. That is, the effective timeout could
  274. // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
  275. // example, if `MaxProcessingTime` is 100ms then a delay of 180ms
  276. // between two messages being sent may not be recognized as a timeout.
  277. MaxProcessingTime time.Duration
  278. // Return specifies what channels will be populated. If they are set to true,
  279. // you must read from them to prevent deadlock.
  280. Return struct {
  281. // If enabled, any errors that occurred while consuming are returned on
  282. // the Errors channel (default disabled).
  283. Errors bool
  284. }
  285. // Offsets specifies configuration for how and when to commit consumed
  286. // offsets. This currently requires the manual use of an OffsetManager
  287. // but will eventually be automated.
  288. Offsets struct {
  289. // How frequently to commit updated offsets. Defaults to 1s.
  290. CommitInterval time.Duration
  291. // The initial offset to use if no offset was previously committed.
  292. // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
  293. Initial int64
  294. // The retention duration for committed offsets. If zero, disabled
  295. // (in which case the `offsets.retention.minutes` option on the
  296. // broker will be used). Kafka only supports precision up to
  297. // milliseconds; nanoseconds will be truncated. Requires Kafka
  298. // broker version 0.9.0 or later.
  299. // (default is 0: disabled).
  300. Retention time.Duration
  301. Retry struct {
  302. // The total number of times to retry failing commit
  303. // requests during OffsetManager shutdown (default 3).
  304. Max int
  305. }
  306. }
  307. }
  308. // A user-provided string sent with every request to the brokers for logging,
  309. // debugging, and auditing purposes. Defaults to "sarama", but you should
  310. // probably set it to something specific to your application.
  311. ClientID string
  312. // The number of events to buffer in internal and external channels. This
  313. // permits the producer and consumer to continue processing some messages
  314. // in the background while user code is working, greatly improving throughput.
  315. // Defaults to 256.
  316. ChannelBufferSize int
  317. // The version of Kafka that Sarama will assume it is running against.
  318. // Defaults to the oldest supported stable version. Since Kafka provides
  319. // backwards-compatibility, setting it to a version older than you have
  320. // will not break anything, although it may prevent you from using the
  321. // latest features. Setting it to a version greater than you are actually
  322. // running may lead to random breakage.
  323. Version KafkaVersion
  324. // The registry to define metrics into.
  325. // Defaults to a local registry.
  326. // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
  327. // prior to starting Sarama.
  328. // See Examples on how to use the metrics registry
  329. MetricRegistry metrics.Registry
  330. }
  331. // NewConfig returns a new configuration instance with sane defaults.
  332. func NewConfig() *Config {
  333. c := &Config{}
  334. c.Admin.Timeout = 3 * time.Second
  335. c.Net.MaxOpenRequests = 5
  336. c.Net.DialTimeout = 30 * time.Second
  337. c.Net.ReadTimeout = 30 * time.Second
  338. c.Net.WriteTimeout = 30 * time.Second
  339. c.Net.SASL.Handshake = true
  340. c.Metadata.Retry.Max = 3
  341. c.Metadata.Retry.Backoff = 250 * time.Millisecond
  342. c.Metadata.RefreshFrequency = 10 * time.Minute
  343. c.Metadata.Full = true
  344. c.Producer.MaxMessageBytes = 1000000
  345. c.Producer.RequiredAcks = WaitForLocal
  346. c.Producer.Timeout = 10 * time.Second
  347. c.Producer.Partitioner = NewHashPartitioner
  348. c.Producer.Retry.Max = 3
  349. c.Producer.Retry.Backoff = 100 * time.Millisecond
  350. c.Producer.Return.Errors = true
  351. c.Producer.CompressionLevel = CompressionLevelDefault
  352. c.Consumer.Fetch.Min = 1
  353. c.Consumer.Fetch.Default = 1024 * 1024
  354. c.Consumer.Retry.Backoff = 2 * time.Second
  355. c.Consumer.MaxWaitTime = 250 * time.Millisecond
  356. c.Consumer.MaxProcessingTime = 100 * time.Millisecond
  357. c.Consumer.Return.Errors = false
  358. c.Consumer.Offsets.CommitInterval = 1 * time.Second
  359. c.Consumer.Offsets.Initial = OffsetNewest
  360. c.Consumer.Offsets.Retry.Max = 3
  361. c.Consumer.Group.Session.Timeout = 10 * time.Second
  362. c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
  363. c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
  364. c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
  365. c.Consumer.Group.Rebalance.Retry.Max = 4
  366. c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
  367. c.ClientID = defaultClientID
  368. c.ChannelBufferSize = 256
  369. c.Version = MinVersion
  370. c.MetricRegistry = metrics.NewRegistry()
  371. return c
  372. }
  373. // Validate checks a Config instance. It will return a
  374. // ConfigurationError if the specified values don't make sense.
  375. func (c *Config) Validate() error {
  376. // some configuration values should be warned on but not fail completely, do those first
  377. if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
  378. Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
  379. }
  380. if c.Net.SASL.Enable == false {
  381. if c.Net.SASL.User != "" {
  382. Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
  383. }
  384. if c.Net.SASL.Password != "" {
  385. Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
  386. }
  387. }
  388. if c.Producer.RequiredAcks > 1 {
  389. Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
  390. }
  391. if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
  392. Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
  393. }
  394. if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
  395. Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
  396. }
  397. if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
  398. Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
  399. }
  400. if c.Producer.Timeout%time.Millisecond != 0 {
  401. Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
  402. }
  403. if c.Consumer.MaxWaitTime < 100*time.Millisecond {
  404. Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
  405. }
  406. if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
  407. Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
  408. }
  409. if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
  410. Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
  411. }
  412. if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
  413. Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
  414. }
  415. if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
  416. Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
  417. }
  418. if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
  419. Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
  420. }
  421. if c.ClientID == defaultClientID {
  422. Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
  423. }
  424. // validate Net values
  425. switch {
  426. case c.Net.MaxOpenRequests <= 0:
  427. return ConfigurationError("Net.MaxOpenRequests must be > 0")
  428. case c.Net.DialTimeout <= 0:
  429. return ConfigurationError("Net.DialTimeout must be > 0")
  430. case c.Net.ReadTimeout <= 0:
  431. return ConfigurationError("Net.ReadTimeout must be > 0")
  432. case c.Net.WriteTimeout <= 0:
  433. return ConfigurationError("Net.WriteTimeout must be > 0")
  434. case c.Net.KeepAlive < 0:
  435. return ConfigurationError("Net.KeepAlive must be >= 0")
  436. case c.Net.SASL.Enable:
  437. // For backwards compatibility, empty mechanism value defaults to PLAIN
  438. isSASLPlain := len(c.Net.SASL.Mechanism) == 0 || c.Net.SASL.Mechanism == SASLTypePlaintext
  439. if isSASLPlain {
  440. if c.Net.SASL.User == "" {
  441. return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
  442. }
  443. if c.Net.SASL.Password == "" {
  444. return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
  445. }
  446. } else if c.Net.SASL.Mechanism == SASLTypeOAuth {
  447. if c.Net.SASL.TokenProvider == nil {
  448. return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.User.TokenProvider")
  449. }
  450. } else {
  451. msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s` and `%s`",
  452. SASLTypeOAuth, SASLTypePlaintext)
  453. return ConfigurationError(msg)
  454. }
  455. }
  456. // validate the Admin values
  457. switch {
  458. case c.Admin.Timeout <= 0:
  459. return ConfigurationError("Admin.Timeout must be > 0")
  460. }
  461. // validate the Metadata values
  462. switch {
  463. case c.Metadata.Retry.Max < 0:
  464. return ConfigurationError("Metadata.Retry.Max must be >= 0")
  465. case c.Metadata.Retry.Backoff < 0:
  466. return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
  467. case c.Metadata.RefreshFrequency < 0:
  468. return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
  469. }
  470. // validate the Producer values
  471. switch {
  472. case c.Producer.MaxMessageBytes <= 0:
  473. return ConfigurationError("Producer.MaxMessageBytes must be > 0")
  474. case c.Producer.RequiredAcks < -1:
  475. return ConfigurationError("Producer.RequiredAcks must be >= -1")
  476. case c.Producer.Timeout <= 0:
  477. return ConfigurationError("Producer.Timeout must be > 0")
  478. case c.Producer.Partitioner == nil:
  479. return ConfigurationError("Producer.Partitioner must not be nil")
  480. case c.Producer.Flush.Bytes < 0:
  481. return ConfigurationError("Producer.Flush.Bytes must be >= 0")
  482. case c.Producer.Flush.Messages < 0:
  483. return ConfigurationError("Producer.Flush.Messages must be >= 0")
  484. case c.Producer.Flush.Frequency < 0:
  485. return ConfigurationError("Producer.Flush.Frequency must be >= 0")
  486. case c.Producer.Flush.MaxMessages < 0:
  487. return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
  488. case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
  489. return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
  490. case c.Producer.Retry.Max < 0:
  491. return ConfigurationError("Producer.Retry.Max must be >= 0")
  492. case c.Producer.Retry.Backoff < 0:
  493. return ConfigurationError("Producer.Retry.Backoff must be >= 0")
  494. }
  495. if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
  496. return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
  497. }
  498. if c.Producer.Compression == CompressionGZIP {
  499. if c.Producer.CompressionLevel != CompressionLevelDefault {
  500. if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
  501. return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
  502. }
  503. }
  504. }
  505. if c.Producer.Idempotent {
  506. if !c.Version.IsAtLeast(V0_11_0_0) {
  507. return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
  508. }
  509. if c.Producer.Retry.Max == 0 {
  510. return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
  511. }
  512. if c.Producer.RequiredAcks != WaitForAll {
  513. return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
  514. }
  515. if c.Net.MaxOpenRequests > 1 {
  516. return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
  517. }
  518. }
  519. // validate the Consumer values
  520. switch {
  521. case c.Consumer.Fetch.Min <= 0:
  522. return ConfigurationError("Consumer.Fetch.Min must be > 0")
  523. case c.Consumer.Fetch.Default <= 0:
  524. return ConfigurationError("Consumer.Fetch.Default must be > 0")
  525. case c.Consumer.Fetch.Max < 0:
  526. return ConfigurationError("Consumer.Fetch.Max must be >= 0")
  527. case c.Consumer.MaxWaitTime < 1*time.Millisecond:
  528. return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
  529. case c.Consumer.MaxProcessingTime <= 0:
  530. return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
  531. case c.Consumer.Retry.Backoff < 0:
  532. return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
  533. case c.Consumer.Offsets.CommitInterval <= 0:
  534. return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
  535. case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
  536. return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
  537. case c.Consumer.Offsets.Retry.Max < 0:
  538. return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
  539. }
  540. // validate the Consumer Group values
  541. switch {
  542. case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
  543. return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
  544. case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
  545. return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
  546. case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
  547. return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
  548. case c.Consumer.Group.Rebalance.Strategy == nil:
  549. return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
  550. case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
  551. return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
  552. case c.Consumer.Group.Rebalance.Retry.Max < 0:
  553. return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
  554. case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
  555. return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
  556. }
  557. // validate misc shared values
  558. switch {
  559. case c.ChannelBufferSize < 0:
  560. return ConfigurationError("ChannelBufferSize must be >= 0")
  561. case !validID.MatchString(c.ClientID):
  562. return ConfigurationError("ClientID is invalid")
  563. }
  564. return nil
  565. }