produce_set.go 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. package sarama
  2. import "time"
  3. type partitionSet struct {
  4. msgs []*ProducerMessage
  5. setToSend *MessageSet
  6. bufferBytes int
  7. }
  8. type produceSet struct {
  9. parent *asyncProducer
  10. msgs map[string]map[int32]*partitionSet
  11. bufferBytes int
  12. bufferCount int
  13. }
  14. func newProduceSet(parent *asyncProducer) *produceSet {
  15. return &produceSet{
  16. msgs: make(map[string]map[int32]*partitionSet),
  17. parent: parent,
  18. }
  19. }
  20. func (ps *produceSet) add(msg *ProducerMessage) error {
  21. var err error
  22. var key, val []byte
  23. if msg.Key != nil {
  24. if key, err = msg.Key.Encode(); err != nil {
  25. return err
  26. }
  27. }
  28. if msg.Value != nil {
  29. if val, err = msg.Value.Encode(); err != nil {
  30. return err
  31. }
  32. }
  33. partitions := ps.msgs[msg.Topic]
  34. if partitions == nil {
  35. partitions = make(map[int32]*partitionSet)
  36. ps.msgs[msg.Topic] = partitions
  37. }
  38. set := partitions[msg.Partition]
  39. if set == nil {
  40. set = &partitionSet{setToSend: new(MessageSet)}
  41. partitions[msg.Partition] = set
  42. }
  43. set.msgs = append(set.msgs, msg)
  44. msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
  45. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  46. if msg.Timestamp.IsZero() {
  47. msgToSend.Timestamp = time.Now()
  48. } else {
  49. msgToSend.Timestamp = msg.Timestamp
  50. }
  51. msgToSend.Version = 1
  52. }
  53. set.setToSend.addMessage(msgToSend)
  54. size := producerMessageOverhead + len(key) + len(val)
  55. set.bufferBytes += size
  56. ps.bufferBytes += size
  57. ps.bufferCount++
  58. return nil
  59. }
  60. func (ps *produceSet) buildRequest() *ProduceRequest {
  61. req := &ProduceRequest{
  62. RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
  63. Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
  64. }
  65. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  66. req.Version = 2
  67. }
  68. for topic, partitionSet := range ps.msgs {
  69. for partition, set := range partitionSet {
  70. if ps.parent.conf.Producer.Compression == CompressionNone {
  71. req.AddSet(topic, partition, set.setToSend)
  72. } else {
  73. // When compression is enabled, the entire set for each partition is compressed
  74. // and sent as the payload of a single fake "message" with the appropriate codec
  75. // set and no key. When the server sees a message with a compression codec, it
  76. // decompresses the payload and treats the result as its message set.
  77. payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry)
  78. if err != nil {
  79. Logger.Println(err) // if this happens, it's basically our fault.
  80. panic(err)
  81. }
  82. compMsg := &Message{
  83. Codec: ps.parent.conf.Producer.Compression,
  84. Key: nil,
  85. Value: payload,
  86. Set: set.setToSend, // Provide the underlying message set for accurate metrics
  87. }
  88. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  89. compMsg.Version = 1
  90. compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp
  91. }
  92. req.AddMessage(topic, partition, compMsg)
  93. }
  94. }
  95. }
  96. return req
  97. }
  98. func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
  99. for topic, partitionSet := range ps.msgs {
  100. for partition, set := range partitionSet {
  101. cb(topic, partition, set.msgs)
  102. }
  103. }
  104. }
  105. func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
  106. if ps.msgs[topic] == nil {
  107. return nil
  108. }
  109. set := ps.msgs[topic][partition]
  110. if set == nil {
  111. return nil
  112. }
  113. ps.bufferBytes -= set.bufferBytes
  114. ps.bufferCount -= len(set.msgs)
  115. delete(ps.msgs[topic], partition)
  116. return set.msgs
  117. }
  118. func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
  119. switch {
  120. // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
  121. case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
  122. return true
  123. // Would we overflow the size-limit of a compressed message-batch for this partition?
  124. case ps.parent.conf.Producer.Compression != CompressionNone &&
  125. ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
  126. ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
  127. return true
  128. // Would we overflow simply in number of messages?
  129. case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
  130. return true
  131. default:
  132. return false
  133. }
  134. }
  135. func (ps *produceSet) readyToFlush() bool {
  136. switch {
  137. // If we don't have any messages, nothing else matters
  138. case ps.empty():
  139. return false
  140. // If all three config values are 0, we always flush as-fast-as-possible
  141. case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
  142. return true
  143. // If we've passed the message trigger-point
  144. case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
  145. return true
  146. // If we've passed the byte trigger-point
  147. case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
  148. return true
  149. default:
  150. return false
  151. }
  152. }
  153. func (ps *produceSet) empty() bool {
  154. return ps.bufferCount == 0
  155. }