fetch_response.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. package sarama
  2. import (
  3. "sort"
  4. "time"
  5. )
  6. type AbortedTransaction struct {
  7. ProducerID int64
  8. FirstOffset int64
  9. }
  10. func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
  11. if t.ProducerID, err = pd.getInt64(); err != nil {
  12. return err
  13. }
  14. if t.FirstOffset, err = pd.getInt64(); err != nil {
  15. return err
  16. }
  17. return nil
  18. }
  19. func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
  20. pe.putInt64(t.ProducerID)
  21. pe.putInt64(t.FirstOffset)
  22. return nil
  23. }
  24. type FetchResponseBlock struct {
  25. Err KError
  26. HighWaterMarkOffset int64
  27. LastStableOffset int64
  28. LogStartOffset int64
  29. AbortedTransactions []*AbortedTransaction
  30. Records *Records // deprecated: use FetchResponseBlock.RecordsSet
  31. RecordsSet []*Records
  32. Partial bool
  33. }
  34. func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
  35. tmp, err := pd.getInt16()
  36. if err != nil {
  37. return err
  38. }
  39. b.Err = KError(tmp)
  40. b.HighWaterMarkOffset, err = pd.getInt64()
  41. if err != nil {
  42. return err
  43. }
  44. if version >= 4 {
  45. b.LastStableOffset, err = pd.getInt64()
  46. if err != nil {
  47. return err
  48. }
  49. if version >= 5 {
  50. b.LogStartOffset, err = pd.getInt64()
  51. if err != nil {
  52. return err
  53. }
  54. }
  55. numTransact, err := pd.getArrayLength()
  56. if err != nil {
  57. return err
  58. }
  59. if numTransact >= 0 {
  60. b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
  61. }
  62. for i := 0; i < numTransact; i++ {
  63. transact := new(AbortedTransaction)
  64. if err = transact.decode(pd); err != nil {
  65. return err
  66. }
  67. b.AbortedTransactions[i] = transact
  68. }
  69. }
  70. recordsSize, err := pd.getInt32()
  71. if err != nil {
  72. return err
  73. }
  74. recordsDecoder, err := pd.getSubset(int(recordsSize))
  75. if err != nil {
  76. return err
  77. }
  78. b.RecordsSet = []*Records{}
  79. for recordsDecoder.remaining() > 0 {
  80. records := &Records{}
  81. if err := records.decode(recordsDecoder); err != nil {
  82. // If we have at least one decoded records, this is not an error
  83. if err == ErrInsufficientData {
  84. if len(b.RecordsSet) == 0 {
  85. b.Partial = true
  86. }
  87. break
  88. }
  89. return err
  90. }
  91. partial, err := records.isPartial()
  92. if err != nil {
  93. return err
  94. }
  95. n, err := records.numRecords()
  96. if err != nil {
  97. return err
  98. }
  99. if n > 0 || (partial && len(b.RecordsSet) == 0) {
  100. b.RecordsSet = append(b.RecordsSet, records)
  101. if b.Records == nil {
  102. b.Records = records
  103. }
  104. }
  105. overflow, err := records.isOverflow()
  106. if err != nil {
  107. return err
  108. }
  109. if partial || overflow {
  110. break
  111. }
  112. }
  113. return nil
  114. }
  115. func (b *FetchResponseBlock) numRecords() (int, error) {
  116. sum := 0
  117. for _, records := range b.RecordsSet {
  118. count, err := records.numRecords()
  119. if err != nil {
  120. return 0, err
  121. }
  122. sum += count
  123. }
  124. return sum, nil
  125. }
  126. func (b *FetchResponseBlock) isPartial() (bool, error) {
  127. if b.Partial {
  128. return true, nil
  129. }
  130. if len(b.RecordsSet) == 1 {
  131. return b.RecordsSet[0].isPartial()
  132. }
  133. return false, nil
  134. }
  135. func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
  136. pe.putInt16(int16(b.Err))
  137. pe.putInt64(b.HighWaterMarkOffset)
  138. if version >= 4 {
  139. pe.putInt64(b.LastStableOffset)
  140. if version >= 5 {
  141. pe.putInt64(b.LogStartOffset)
  142. }
  143. if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
  144. return err
  145. }
  146. for _, transact := range b.AbortedTransactions {
  147. if err = transact.encode(pe); err != nil {
  148. return err
  149. }
  150. }
  151. }
  152. pe.push(&lengthField{})
  153. for _, records := range b.RecordsSet {
  154. err = records.encode(pe)
  155. if err != nil {
  156. return err
  157. }
  158. }
  159. return pe.pop()
  160. }
  161. func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
  162. // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
  163. // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
  164. at := b.AbortedTransactions
  165. sort.Slice(
  166. at,
  167. func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
  168. )
  169. return at
  170. }
  171. type FetchResponse struct {
  172. Blocks map[string]map[int32]*FetchResponseBlock
  173. ThrottleTime time.Duration
  174. ErrorCode int16
  175. SessionID int32
  176. Version int16
  177. LogAppendTime bool
  178. Timestamp time.Time
  179. }
  180. func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
  181. r.Version = version
  182. if r.Version >= 1 {
  183. throttle, err := pd.getInt32()
  184. if err != nil {
  185. return err
  186. }
  187. r.ThrottleTime = time.Duration(throttle) * time.Millisecond
  188. }
  189. if r.Version >= 7 {
  190. r.ErrorCode, err = pd.getInt16()
  191. if err != nil {
  192. return err
  193. }
  194. r.SessionID, err = pd.getInt32()
  195. if err != nil {
  196. return err
  197. }
  198. }
  199. numTopics, err := pd.getArrayLength()
  200. if err != nil {
  201. return err
  202. }
  203. r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
  204. for i := 0; i < numTopics; i++ {
  205. name, err := pd.getString()
  206. if err != nil {
  207. return err
  208. }
  209. numBlocks, err := pd.getArrayLength()
  210. if err != nil {
  211. return err
  212. }
  213. r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
  214. for j := 0; j < numBlocks; j++ {
  215. id, err := pd.getInt32()
  216. if err != nil {
  217. return err
  218. }
  219. block := new(FetchResponseBlock)
  220. err = block.decode(pd, version)
  221. if err != nil {
  222. return err
  223. }
  224. r.Blocks[name][id] = block
  225. }
  226. }
  227. return nil
  228. }
  229. func (r *FetchResponse) encode(pe packetEncoder) (err error) {
  230. if r.Version >= 1 {
  231. pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
  232. }
  233. if r.Version >= 7 {
  234. pe.putInt16(r.ErrorCode)
  235. pe.putInt32(r.SessionID)
  236. }
  237. err = pe.putArrayLength(len(r.Blocks))
  238. if err != nil {
  239. return err
  240. }
  241. for topic, partitions := range r.Blocks {
  242. err = pe.putString(topic)
  243. if err != nil {
  244. return err
  245. }
  246. err = pe.putArrayLength(len(partitions))
  247. if err != nil {
  248. return err
  249. }
  250. for id, block := range partitions {
  251. pe.putInt32(id)
  252. err = block.encode(pe, r.Version)
  253. if err != nil {
  254. return err
  255. }
  256. }
  257. }
  258. return nil
  259. }
  260. func (r *FetchResponse) key() int16 {
  261. return 1
  262. }
  263. func (r *FetchResponse) version() int16 {
  264. return r.Version
  265. }
  266. func (r *FetchResponse) requiredVersion() KafkaVersion {
  267. switch r.Version {
  268. case 0:
  269. return MinVersion
  270. case 1:
  271. return V0_9_0_0
  272. case 2:
  273. return V0_10_0_0
  274. case 3:
  275. return V0_10_1_0
  276. case 4, 5:
  277. return V0_11_0_0
  278. case 6:
  279. return V1_0_0_0
  280. case 7:
  281. return V1_1_0_0
  282. case 8:
  283. return V2_0_0_0
  284. case 9, 10:
  285. return V2_1_0_0
  286. case 11:
  287. return V2_3_0_0
  288. default:
  289. return MaxVersion
  290. }
  291. }
  292. func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
  293. if r.Blocks == nil {
  294. return nil
  295. }
  296. if r.Blocks[topic] == nil {
  297. return nil
  298. }
  299. return r.Blocks[topic][partition]
  300. }
  301. func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
  302. if r.Blocks == nil {
  303. r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
  304. }
  305. partitions, ok := r.Blocks[topic]
  306. if !ok {
  307. partitions = make(map[int32]*FetchResponseBlock)
  308. r.Blocks[topic] = partitions
  309. }
  310. frb, ok := partitions[partition]
  311. if !ok {
  312. frb = new(FetchResponseBlock)
  313. partitions[partition] = frb
  314. }
  315. frb.Err = err
  316. }
  317. func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
  318. if r.Blocks == nil {
  319. r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
  320. }
  321. partitions, ok := r.Blocks[topic]
  322. if !ok {
  323. partitions = make(map[int32]*FetchResponseBlock)
  324. r.Blocks[topic] = partitions
  325. }
  326. frb, ok := partitions[partition]
  327. if !ok {
  328. frb = new(FetchResponseBlock)
  329. partitions[partition] = frb
  330. }
  331. return frb
  332. }
  333. func encodeKV(key, value Encoder) ([]byte, []byte) {
  334. var kb []byte
  335. var vb []byte
  336. if key != nil {
  337. kb, _ = key.Encode()
  338. }
  339. if value != nil {
  340. vb, _ = value.Encode()
  341. }
  342. return kb, vb
  343. }
  344. func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
  345. frb := r.getOrCreateBlock(topic, partition)
  346. kb, vb := encodeKV(key, value)
  347. if r.LogAppendTime {
  348. timestamp = r.Timestamp
  349. }
  350. msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
  351. msgBlock := &MessageBlock{Msg: msg, Offset: offset}
  352. if len(frb.RecordsSet) == 0 {
  353. records := newLegacyRecords(&MessageSet{})
  354. frb.RecordsSet = []*Records{&records}
  355. }
  356. set := frb.RecordsSet[0].MsgSet
  357. set.Messages = append(set.Messages, msgBlock)
  358. }
  359. func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
  360. frb := r.getOrCreateBlock(topic, partition)
  361. kb, vb := encodeKV(key, value)
  362. if len(frb.RecordsSet) == 0 {
  363. records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
  364. frb.RecordsSet = []*Records{&records}
  365. }
  366. batch := frb.RecordsSet[0].RecordBatch
  367. rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
  368. batch.addRecord(rec)
  369. }
  370. // AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
  371. // But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
  372. // Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
  373. func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
  374. frb := r.getOrCreateBlock(topic, partition)
  375. kb, vb := encodeKV(key, value)
  376. records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
  377. batch := &RecordBatch{
  378. Version: 2,
  379. LogAppendTime: r.LogAppendTime,
  380. FirstTimestamp: timestamp,
  381. MaxTimestamp: r.Timestamp,
  382. FirstOffset: offset,
  383. LastOffsetDelta: 0,
  384. ProducerID: producerID,
  385. IsTransactional: isTransactional,
  386. }
  387. rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
  388. batch.addRecord(rec)
  389. records.RecordBatch = batch
  390. frb.RecordsSet = append(frb.RecordsSet, &records)
  391. }
  392. func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
  393. frb := r.getOrCreateBlock(topic, partition)
  394. // batch
  395. batch := &RecordBatch{
  396. Version: 2,
  397. LogAppendTime: r.LogAppendTime,
  398. FirstTimestamp: timestamp,
  399. MaxTimestamp: r.Timestamp,
  400. FirstOffset: offset,
  401. LastOffsetDelta: 0,
  402. ProducerID: producerID,
  403. IsTransactional: true,
  404. Control: true,
  405. }
  406. // records
  407. records := newDefaultRecords(nil)
  408. records.RecordBatch = batch
  409. // record
  410. crAbort := ControlRecord{
  411. Version: 0,
  412. Type: recordType,
  413. }
  414. crKey := &realEncoder{raw: make([]byte, 4)}
  415. crValue := &realEncoder{raw: make([]byte, 6)}
  416. crAbort.encode(crKey, crValue)
  417. rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
  418. batch.addRecord(rec)
  419. frb.RecordsSet = append(frb.RecordsSet, &records)
  420. }
  421. func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
  422. r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
  423. }
  424. func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
  425. r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
  426. }
  427. func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
  428. r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
  429. }
  430. func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
  431. // define controlRecord key and value
  432. r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
  433. }
  434. func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
  435. frb := r.getOrCreateBlock(topic, partition)
  436. if len(frb.RecordsSet) == 0 {
  437. records := newDefaultRecords(&RecordBatch{Version: 2})
  438. frb.RecordsSet = []*Records{&records}
  439. }
  440. batch := frb.RecordsSet[0].RecordBatch
  441. batch.LastOffsetDelta = offset
  442. }
  443. func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
  444. frb := r.getOrCreateBlock(topic, partition)
  445. frb.LastStableOffset = offset
  446. }