sarama.go 5.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. /*
  2. Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
  3. API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
  4. API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
  5. To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
  6. and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
  7. The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
  8. useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
  9. depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
  10. SyncProducer can still sometimes be lost.
  11. To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
  12. consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
  13. https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
  14. and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
  15. For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
  16. and message sent on the wire; the Client provides higher-level metadata management that is shared between
  17. the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
  18. exactly with the protocol fields documented by Kafka at
  19. https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
  20. Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
  21. Broker related metrics:
  22. +------------------------------------------------+------------+---------------------------------------------------------------+
  23. | Name | Type | Description |
  24. +------------------------------------------------+------------+---------------------------------------------------------------+
  25. | incoming-byte-rate | meter | Bytes/second read off all brokers |
  26. | incoming-byte-rate-for-broker-<broker-id> | meter | Bytes/second read off a given broker |
  27. | outgoing-byte-rate | meter | Bytes/second written off all brokers |
  28. | outgoing-byte-rate-for-broker-<broker-id> | meter | Bytes/second written off a given broker |
  29. | request-rate | meter | Requests/second sent to all brokers |
  30. | request-rate-for-broker-<broker-id> | meter | Requests/second sent to a given broker |
  31. | histogram request-size | histogram | Distribution of the request size in bytes for all brokers |
  32. | histogram request-size-for-broker-<broker-id> | histogram | Distribution of the request size in bytes for a given broker |
  33. | response-rate | meter | Responses/second received from all brokers |
  34. | response-rate-for-broker-<broker-id> | meter | Responses/second received from a given broker |
  35. | histogram response-size | histogram | Distribution of the response size in bytes for all brokers |
  36. | histogram response-size-for-broker-<broker-id> | histogram | Distribution of the response size in bytes for a given broker |
  37. +------------------------------------------------+------------+---------------------------------------------------------------+
  38. Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
  39. */
  40. package sarama
  41. import (
  42. "io/ioutil"
  43. "log"
  44. )
  45. // Logger is the instance of a StdLogger interface that Sarama writes connection
  46. // management events to. By default it is set to discard all log messages via ioutil.Discard,
  47. // but you can set it to redirect wherever you want.
  48. var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
  49. // StdLogger is used to log error messages.
  50. type StdLogger interface {
  51. Print(v ...interface{})
  52. Printf(format string, v ...interface{})
  53. Println(v ...interface{})
  54. }
  55. // PanicHandler is called for recovering from panics spawned internally to the library (and thus
  56. // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
  57. var PanicHandler func(interface{})
  58. // MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
  59. // to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
  60. // with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
  61. // to process.
  62. var MaxRequestSize int32 = 100 * 1024 * 1024
  63. // MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
  64. // a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
  65. // protect the client from running out of memory. Please note that brokers do not have any natural limit on
  66. // the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
  67. // (see https://issues.apache.org/jira/browse/KAFKA-2063).
  68. var MaxResponseSize int32 = 100 * 1024 * 1024