produce_set.go 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. package sarama
  2. import (
  3. "encoding/binary"
  4. "time"
  5. )
  6. type partitionSet struct {
  7. msgs []*ProducerMessage
  8. recordsToSend Records
  9. bufferBytes int
  10. }
  11. type produceSet struct {
  12. parent *asyncProducer
  13. msgs map[string]map[int32]*partitionSet
  14. bufferBytes int
  15. bufferCount int
  16. }
  17. func newProduceSet(parent *asyncProducer) *produceSet {
  18. return &produceSet{
  19. msgs: make(map[string]map[int32]*partitionSet),
  20. parent: parent,
  21. }
  22. }
  23. func (ps *produceSet) add(msg *ProducerMessage) error {
  24. var err error
  25. var key, val []byte
  26. if msg.Key != nil {
  27. if key, err = msg.Key.Encode(); err != nil {
  28. return err
  29. }
  30. }
  31. if msg.Value != nil {
  32. if val, err = msg.Value.Encode(); err != nil {
  33. return err
  34. }
  35. }
  36. timestamp := msg.Timestamp
  37. if msg.Timestamp.IsZero() {
  38. timestamp = time.Now()
  39. }
  40. partitions := ps.msgs[msg.Topic]
  41. if partitions == nil {
  42. partitions = make(map[int32]*partitionSet)
  43. ps.msgs[msg.Topic] = partitions
  44. }
  45. var size int
  46. set := partitions[msg.Partition]
  47. if set == nil {
  48. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  49. batch := &RecordBatch{
  50. FirstTimestamp: timestamp,
  51. Version: 2,
  52. ProducerID: -1, /* No producer id */
  53. Codec: ps.parent.conf.Producer.Compression,
  54. CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
  55. }
  56. set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
  57. size = recordBatchOverhead
  58. } else {
  59. set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
  60. }
  61. partitions[msg.Partition] = set
  62. }
  63. set.msgs = append(set.msgs, msg)
  64. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  65. // We are being conservative here to avoid having to prep encode the record
  66. size += maximumRecordOverhead
  67. rec := &Record{
  68. Key: key,
  69. Value: val,
  70. TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
  71. }
  72. size += len(key) + len(val)
  73. if len(msg.Headers) > 0 {
  74. rec.Headers = make([]*RecordHeader, len(msg.Headers))
  75. for i := range msg.Headers {
  76. rec.Headers[i] = &msg.Headers[i]
  77. size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
  78. }
  79. }
  80. set.recordsToSend.RecordBatch.addRecord(rec)
  81. } else {
  82. msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
  83. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  84. msgToSend.Timestamp = timestamp
  85. msgToSend.Version = 1
  86. }
  87. set.recordsToSend.MsgSet.addMessage(msgToSend)
  88. size = producerMessageOverhead + len(key) + len(val)
  89. }
  90. set.bufferBytes += size
  91. ps.bufferBytes += size
  92. ps.bufferCount++
  93. return nil
  94. }
  95. func (ps *produceSet) buildRequest() *ProduceRequest {
  96. req := &ProduceRequest{
  97. RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
  98. Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
  99. }
  100. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  101. req.Version = 2
  102. }
  103. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  104. req.Version = 3
  105. }
  106. for topic, partitionSet := range ps.msgs {
  107. for partition, set := range partitionSet {
  108. if req.Version >= 3 {
  109. rb := set.recordsToSend.RecordBatch
  110. if len(rb.Records) > 0 {
  111. rb.LastOffsetDelta = int32(len(rb.Records) - 1)
  112. for i, record := range rb.Records {
  113. record.OffsetDelta = int64(i)
  114. }
  115. }
  116. req.AddBatch(topic, partition, rb)
  117. continue
  118. }
  119. if ps.parent.conf.Producer.Compression == CompressionNone {
  120. req.AddSet(topic, partition, set.recordsToSend.MsgSet)
  121. } else {
  122. // When compression is enabled, the entire set for each partition is compressed
  123. // and sent as the payload of a single fake "message" with the appropriate codec
  124. // set and no key. When the server sees a message with a compression codec, it
  125. // decompresses the payload and treats the result as its message set.
  126. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  127. // If our version is 0.10 or later, assign relative offsets
  128. // to the inner messages. This lets the broker avoid
  129. // recompressing the message set.
  130. // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
  131. // for details on relative offsets.)
  132. for i, msg := range set.recordsToSend.MsgSet.Messages {
  133. msg.Offset = int64(i)
  134. }
  135. }
  136. payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry)
  137. if err != nil {
  138. Logger.Println(err) // if this happens, it's basically our fault.
  139. panic(err)
  140. }
  141. compMsg := &Message{
  142. Codec: ps.parent.conf.Producer.Compression,
  143. CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
  144. Key: nil,
  145. Value: payload,
  146. Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
  147. }
  148. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  149. compMsg.Version = 1
  150. compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
  151. }
  152. req.AddMessage(topic, partition, compMsg)
  153. }
  154. }
  155. }
  156. return req
  157. }
  158. func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
  159. for topic, partitionSet := range ps.msgs {
  160. for partition, set := range partitionSet {
  161. cb(topic, partition, set.msgs)
  162. }
  163. }
  164. }
  165. func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
  166. if ps.msgs[topic] == nil {
  167. return nil
  168. }
  169. set := ps.msgs[topic][partition]
  170. if set == nil {
  171. return nil
  172. }
  173. ps.bufferBytes -= set.bufferBytes
  174. ps.bufferCount -= len(set.msgs)
  175. delete(ps.msgs[topic], partition)
  176. return set.msgs
  177. }
  178. func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
  179. version := 1
  180. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  181. version = 2
  182. }
  183. switch {
  184. // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
  185. case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
  186. return true
  187. // Would we overflow the size-limit of a compressed message-batch for this partition?
  188. case ps.parent.conf.Producer.Compression != CompressionNone &&
  189. ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
  190. ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
  191. return true
  192. // Would we overflow simply in number of messages?
  193. case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
  194. return true
  195. default:
  196. return false
  197. }
  198. }
  199. func (ps *produceSet) readyToFlush() bool {
  200. switch {
  201. // If we don't have any messages, nothing else matters
  202. case ps.empty():
  203. return false
  204. // If all three config values are 0, we always flush as-fast-as-possible
  205. case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
  206. return true
  207. // If we've passed the message trigger-point
  208. case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
  209. return true
  210. // If we've passed the byte trigger-point
  211. case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
  212. return true
  213. default:
  214. return false
  215. }
  216. }
  217. func (ps *produceSet) empty() bool {
  218. return ps.bufferCount == 0
  219. }