produce_set.go 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. package sarama
  2. import (
  3. "encoding/binary"
  4. "errors"
  5. "time"
  6. )
  7. type partitionSet struct {
  8. msgs []*ProducerMessage
  9. recordsToSend Records
  10. bufferBytes int
  11. }
  12. type produceSet struct {
  13. parent *asyncProducer
  14. msgs map[string]map[int32]*partitionSet
  15. bufferBytes int
  16. bufferCount int
  17. }
  18. func newProduceSet(parent *asyncProducer) *produceSet {
  19. return &produceSet{
  20. msgs: make(map[string]map[int32]*partitionSet),
  21. parent: parent,
  22. }
  23. }
  24. func (ps *produceSet) add(msg *ProducerMessage) error {
  25. var err error
  26. var key, val []byte
  27. if msg.Key != nil {
  28. if key, err = msg.Key.Encode(); err != nil {
  29. return err
  30. }
  31. }
  32. if msg.Value != nil {
  33. if val, err = msg.Value.Encode(); err != nil {
  34. return err
  35. }
  36. }
  37. timestamp := msg.Timestamp
  38. if timestamp.IsZero() {
  39. timestamp = time.Now()
  40. }
  41. timestamp = timestamp.Truncate(time.Millisecond)
  42. partitions := ps.msgs[msg.Topic]
  43. if partitions == nil {
  44. partitions = make(map[int32]*partitionSet)
  45. ps.msgs[msg.Topic] = partitions
  46. }
  47. var size int
  48. set := partitions[msg.Partition]
  49. if set == nil {
  50. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  51. batch := &RecordBatch{
  52. FirstTimestamp: timestamp,
  53. Version: 2,
  54. Codec: ps.parent.conf.Producer.Compression,
  55. CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
  56. ProducerID: ps.parent.txnmgr.producerID,
  57. ProducerEpoch: ps.parent.txnmgr.producerEpoch,
  58. }
  59. if ps.parent.conf.Producer.Idempotent {
  60. batch.FirstSequence = msg.sequenceNumber
  61. }
  62. set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
  63. size = recordBatchOverhead
  64. } else {
  65. set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
  66. }
  67. partitions[msg.Partition] = set
  68. }
  69. set.msgs = append(set.msgs, msg)
  70. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  71. if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
  72. return errors.New("assertion failed: message out of sequence added to a batch")
  73. }
  74. // We are being conservative here to avoid having to prep encode the record
  75. size += maximumRecordOverhead
  76. rec := &Record{
  77. Key: key,
  78. Value: val,
  79. TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
  80. }
  81. size += len(key) + len(val)
  82. if len(msg.Headers) > 0 {
  83. rec.Headers = make([]*RecordHeader, len(msg.Headers))
  84. for i := range msg.Headers {
  85. rec.Headers[i] = &msg.Headers[i]
  86. size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
  87. }
  88. }
  89. set.recordsToSend.RecordBatch.addRecord(rec)
  90. } else {
  91. msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
  92. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  93. msgToSend.Timestamp = timestamp
  94. msgToSend.Version = 1
  95. }
  96. set.recordsToSend.MsgSet.addMessage(msgToSend)
  97. size = producerMessageOverhead + len(key) + len(val)
  98. }
  99. set.bufferBytes += size
  100. ps.bufferBytes += size
  101. ps.bufferCount++
  102. return nil
  103. }
  104. func (ps *produceSet) buildRequest() *ProduceRequest {
  105. req := &ProduceRequest{
  106. RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
  107. Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
  108. }
  109. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  110. req.Version = 2
  111. }
  112. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  113. req.Version = 3
  114. }
  115. for topic, partitionSets := range ps.msgs {
  116. for partition, set := range partitionSets {
  117. if req.Version >= 3 {
  118. // If the API version we're hitting is 3 or greater, we need to calculate
  119. // offsets for each record in the batch relative to FirstOffset.
  120. // Additionally, we must set LastOffsetDelta to the value of the last offset
  121. // in the batch. Since the OffsetDelta of the first record is 0, we know that the
  122. // final record of any batch will have an offset of (# of records in batch) - 1.
  123. // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
  124. // under the RecordBatch section for details.)
  125. rb := set.recordsToSend.RecordBatch
  126. if len(rb.Records) > 0 {
  127. rb.LastOffsetDelta = int32(len(rb.Records) - 1)
  128. for i, record := range rb.Records {
  129. record.OffsetDelta = int64(i)
  130. }
  131. }
  132. req.AddBatch(topic, partition, rb)
  133. continue
  134. }
  135. if ps.parent.conf.Producer.Compression == CompressionNone {
  136. req.AddSet(topic, partition, set.recordsToSend.MsgSet)
  137. } else {
  138. // When compression is enabled, the entire set for each partition is compressed
  139. // and sent as the payload of a single fake "message" with the appropriate codec
  140. // set and no key. When the server sees a message with a compression codec, it
  141. // decompresses the payload and treats the result as its message set.
  142. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  143. // If our version is 0.10 or later, assign relative offsets
  144. // to the inner messages. This lets the broker avoid
  145. // recompressing the message set.
  146. // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
  147. // for details on relative offsets.)
  148. for i, msg := range set.recordsToSend.MsgSet.Messages {
  149. msg.Offset = int64(i)
  150. }
  151. }
  152. payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry)
  153. if err != nil {
  154. Logger.Println(err) // if this happens, it's basically our fault.
  155. panic(err)
  156. }
  157. compMsg := &Message{
  158. Codec: ps.parent.conf.Producer.Compression,
  159. CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
  160. Key: nil,
  161. Value: payload,
  162. Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
  163. }
  164. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  165. compMsg.Version = 1
  166. compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
  167. }
  168. req.AddMessage(topic, partition, compMsg)
  169. }
  170. }
  171. }
  172. return req
  173. }
  174. func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
  175. for topic, partitionSet := range ps.msgs {
  176. for partition, set := range partitionSet {
  177. cb(topic, partition, set)
  178. }
  179. }
  180. }
  181. func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
  182. if ps.msgs[topic] == nil {
  183. return nil
  184. }
  185. set := ps.msgs[topic][partition]
  186. if set == nil {
  187. return nil
  188. }
  189. ps.bufferBytes -= set.bufferBytes
  190. ps.bufferCount -= len(set.msgs)
  191. delete(ps.msgs[topic], partition)
  192. return set.msgs
  193. }
  194. func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
  195. version := 1
  196. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  197. version = 2
  198. }
  199. switch {
  200. // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
  201. case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
  202. return true
  203. // Would we overflow the size-limit of a message-batch for this partition?
  204. case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
  205. ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
  206. return true
  207. // Would we overflow simply in number of messages?
  208. case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
  209. return true
  210. default:
  211. return false
  212. }
  213. }
  214. func (ps *produceSet) readyToFlush() bool {
  215. switch {
  216. // If we don't have any messages, nothing else matters
  217. case ps.empty():
  218. return false
  219. // If all three config values are 0, we always flush as-fast-as-possible
  220. case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
  221. return true
  222. // If we've passed the message trigger-point
  223. case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
  224. return true
  225. // If we've passed the byte trigger-point
  226. case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
  227. return true
  228. default:
  229. return false
  230. }
  231. }
  232. func (ps *produceSet) empty() bool {
  233. return ps.bufferCount == 0
  234. }