offset_manager_test.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. package sarama
  2. import (
  3. "sync/atomic"
  4. "testing"
  5. "time"
  6. )
  7. func initOffsetManagerWithBackoffFunc(t *testing.T, retention time.Duration,
  8. backoffFunc func(retries, maxRetries int) time.Duration, config *Config) (om OffsetManager,
  9. testClient Client, broker, coordinator *MockBroker) {
  10. config.Metadata.Retry.Max = 1
  11. if backoffFunc != nil {
  12. config.Metadata.Retry.BackoffFunc = backoffFunc
  13. }
  14. config.Consumer.Offsets.AutoCommit.Interval = 1 * time.Millisecond
  15. config.Version = V0_9_0_0
  16. if retention > 0 {
  17. config.Consumer.Offsets.Retention = retention
  18. }
  19. broker = NewMockBroker(t, 1)
  20. coordinator = NewMockBroker(t, 2)
  21. seedMeta := new(MetadataResponse)
  22. seedMeta.AddBroker(coordinator.Addr(), coordinator.BrokerID())
  23. seedMeta.AddTopicPartition("my_topic", 0, 1, []int32{}, []int32{}, []int32{}, ErrNoError)
  24. seedMeta.AddTopicPartition("my_topic", 1, 1, []int32{}, []int32{}, []int32{}, ErrNoError)
  25. broker.Returns(seedMeta)
  26. var err error
  27. testClient, err = NewClient([]string{broker.Addr()}, config)
  28. if err != nil {
  29. t.Fatal(err)
  30. }
  31. broker.Returns(&ConsumerMetadataResponse{
  32. CoordinatorID: coordinator.BrokerID(),
  33. CoordinatorHost: "127.0.0.1",
  34. CoordinatorPort: coordinator.Port(),
  35. })
  36. om, err = NewOffsetManagerFromClient("group", testClient)
  37. if err != nil {
  38. t.Fatal(err)
  39. }
  40. return om, testClient, broker, coordinator
  41. }
  42. func initOffsetManager(t *testing.T, retention time.Duration) (om OffsetManager,
  43. testClient Client, broker, coordinator *MockBroker) {
  44. return initOffsetManagerWithBackoffFunc(t, retention, nil, NewConfig())
  45. }
  46. func initPartitionOffsetManager(t *testing.T, om OffsetManager,
  47. coordinator *MockBroker, initialOffset int64, metadata string) PartitionOffsetManager {
  48. fetchResponse := new(OffsetFetchResponse)
  49. fetchResponse.AddBlock("my_topic", 0, &OffsetFetchResponseBlock{
  50. Err: ErrNoError,
  51. Offset: initialOffset,
  52. Metadata: metadata,
  53. })
  54. coordinator.Returns(fetchResponse)
  55. pom, err := om.ManagePartition("my_topic", 0)
  56. if err != nil {
  57. t.Fatal(err)
  58. }
  59. return pom
  60. }
  61. func TestNewOffsetManager(t *testing.T) {
  62. seedBroker := NewMockBroker(t, 1)
  63. seedBroker.Returns(new(MetadataResponse))
  64. defer seedBroker.Close()
  65. testClient, err := NewClient([]string{seedBroker.Addr()}, nil)
  66. if err != nil {
  67. t.Fatal(err)
  68. }
  69. om, err := NewOffsetManagerFromClient("group", testClient)
  70. if err != nil {
  71. t.Error(err)
  72. }
  73. safeClose(t, om)
  74. safeClose(t, testClient)
  75. _, err = NewOffsetManagerFromClient("group", testClient)
  76. if err != ErrClosedClient {
  77. t.Errorf("Error expected for closed client; actual value: %v", err)
  78. }
  79. }
  80. var offsetsautocommitTestTable = []struct {
  81. name string
  82. set bool // if given will override default configuration for Consumer.Offsets.AutoCommit.Enable
  83. enable bool
  84. }{
  85. {
  86. "AutoCommit (default)",
  87. false, // use default
  88. true,
  89. },
  90. {
  91. "AutoCommit Enabled",
  92. true,
  93. true,
  94. },
  95. {
  96. "AutoCommit Disabled",
  97. true,
  98. false,
  99. },
  100. }
  101. func TestNewOffsetManagerOffsetsAutoCommit(t *testing.T) {
  102. // Tests to validate configuration of `Consumer.Offsets.AutoCommit.Enable`
  103. for _, tt := range offsetsautocommitTestTable {
  104. t.Run(tt.name, func(t *testing.T) {
  105. config := NewConfig()
  106. if tt.set {
  107. config.Consumer.Offsets.AutoCommit.Enable = tt.enable
  108. }
  109. om, testClient, broker, coordinator := initOffsetManagerWithBackoffFunc(t, 0, nil, config)
  110. pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
  111. // Wait long enough for the test not to fail..
  112. timeout := 50 * config.Consumer.Offsets.AutoCommit.Interval
  113. called := make(chan none)
  114. ocResponse := new(OffsetCommitResponse)
  115. ocResponse.AddError("my_topic", 0, ErrNoError)
  116. handler := func(req *request) (res encoder) {
  117. close(called)
  118. return ocResponse
  119. }
  120. coordinator.setHandler(handler)
  121. // Should force an offset commit, if auto-commit is enabled.
  122. expected := int64(1)
  123. pom.ResetOffset(expected, "modified_meta")
  124. _, _ = pom.NextOffset()
  125. select {
  126. case <-called:
  127. // OffsetManager called on the wire.
  128. if !config.Consumer.Offsets.AutoCommit.Enable {
  129. t.Errorf("Received request for: %s when AutoCommit is disabled", tt.name)
  130. }
  131. case <-time.After(timeout):
  132. // Timeout waiting for OffsetManager to call on the wire.
  133. if config.Consumer.Offsets.AutoCommit.Enable {
  134. t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout)
  135. }
  136. }
  137. broker.Close()
  138. coordinator.Close()
  139. // !! om must be closed before the pom so pom.release() is called before pom.Close()
  140. safeClose(t, om)
  141. safeClose(t, pom)
  142. safeClose(t, testClient)
  143. })
  144. }
  145. }
  146. // Test recovery from ErrNotCoordinatorForConsumer
  147. // on first fetchInitialOffset call
  148. func TestOffsetManagerFetchInitialFail(t *testing.T) {
  149. om, testClient, broker, coordinator := initOffsetManager(t, 0)
  150. // Error on first fetchInitialOffset call
  151. responseBlock := OffsetFetchResponseBlock{
  152. Err: ErrNotCoordinatorForConsumer,
  153. Offset: 5,
  154. Metadata: "test_meta",
  155. }
  156. fetchResponse := new(OffsetFetchResponse)
  157. fetchResponse.AddBlock("my_topic", 0, &responseBlock)
  158. coordinator.Returns(fetchResponse)
  159. // Refresh coordinator
  160. newCoordinator := NewMockBroker(t, 3)
  161. broker.Returns(&ConsumerMetadataResponse{
  162. CoordinatorID: newCoordinator.BrokerID(),
  163. CoordinatorHost: "127.0.0.1",
  164. CoordinatorPort: newCoordinator.Port(),
  165. })
  166. // Second fetchInitialOffset call is fine
  167. fetchResponse2 := new(OffsetFetchResponse)
  168. responseBlock2 := responseBlock
  169. responseBlock2.Err = ErrNoError
  170. fetchResponse2.AddBlock("my_topic", 0, &responseBlock2)
  171. newCoordinator.Returns(fetchResponse2)
  172. pom, err := om.ManagePartition("my_topic", 0)
  173. if err != nil {
  174. t.Error(err)
  175. }
  176. broker.Close()
  177. coordinator.Close()
  178. newCoordinator.Close()
  179. safeClose(t, pom)
  180. safeClose(t, om)
  181. safeClose(t, testClient)
  182. }
  183. // Test fetchInitialOffset retry on ErrOffsetsLoadInProgress
  184. func TestOffsetManagerFetchInitialLoadInProgress(t *testing.T) {
  185. retryCount := int32(0)
  186. backoff := func(retries, maxRetries int) time.Duration {
  187. atomic.AddInt32(&retryCount, 1)
  188. return 0
  189. }
  190. om, testClient, broker, coordinator := initOffsetManagerWithBackoffFunc(t, 0, backoff, NewConfig())
  191. // Error on first fetchInitialOffset call
  192. responseBlock := OffsetFetchResponseBlock{
  193. Err: ErrOffsetsLoadInProgress,
  194. Offset: 5,
  195. Metadata: "test_meta",
  196. }
  197. fetchResponse := new(OffsetFetchResponse)
  198. fetchResponse.AddBlock("my_topic", 0, &responseBlock)
  199. coordinator.Returns(fetchResponse)
  200. // Second fetchInitialOffset call is fine
  201. fetchResponse2 := new(OffsetFetchResponse)
  202. responseBlock2 := responseBlock
  203. responseBlock2.Err = ErrNoError
  204. fetchResponse2.AddBlock("my_topic", 0, &responseBlock2)
  205. coordinator.Returns(fetchResponse2)
  206. pom, err := om.ManagePartition("my_topic", 0)
  207. if err != nil {
  208. t.Error(err)
  209. }
  210. broker.Close()
  211. coordinator.Close()
  212. safeClose(t, pom)
  213. safeClose(t, om)
  214. safeClose(t, testClient)
  215. if atomic.LoadInt32(&retryCount) == 0 {
  216. t.Fatal("Expected at least one retry")
  217. }
  218. }
  219. func TestPartitionOffsetManagerInitialOffset(t *testing.T) {
  220. om, testClient, broker, coordinator := initOffsetManager(t, 0)
  221. testClient.Config().Consumer.Offsets.Initial = OffsetOldest
  222. // Kafka returns -1 if no offset has been stored for this partition yet.
  223. pom := initPartitionOffsetManager(t, om, coordinator, -1, "")
  224. offset, meta := pom.NextOffset()
  225. if offset != OffsetOldest {
  226. t.Errorf("Expected offset 5. Actual: %v", offset)
  227. }
  228. if meta != "" {
  229. t.Errorf("Expected metadata to be empty. Actual: %q", meta)
  230. }
  231. safeClose(t, pom)
  232. safeClose(t, om)
  233. broker.Close()
  234. coordinator.Close()
  235. safeClose(t, testClient)
  236. }
  237. func TestPartitionOffsetManagerNextOffset(t *testing.T) {
  238. om, testClient, broker, coordinator := initOffsetManager(t, 0)
  239. pom := initPartitionOffsetManager(t, om, coordinator, 5, "test_meta")
  240. offset, meta := pom.NextOffset()
  241. if offset != 5 {
  242. t.Errorf("Expected offset 5. Actual: %v", offset)
  243. }
  244. if meta != "test_meta" {
  245. t.Errorf("Expected metadata \"test_meta\". Actual: %q", meta)
  246. }
  247. safeClose(t, pom)
  248. safeClose(t, om)
  249. broker.Close()
  250. coordinator.Close()
  251. safeClose(t, testClient)
  252. }
  253. func TestPartitionOffsetManagerResetOffset(t *testing.T) {
  254. om, testClient, broker, coordinator := initOffsetManager(t, 0)
  255. pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
  256. ocResponse := new(OffsetCommitResponse)
  257. ocResponse.AddError("my_topic", 0, ErrNoError)
  258. coordinator.Returns(ocResponse)
  259. expected := int64(1)
  260. pom.ResetOffset(expected, "modified_meta")
  261. actual, meta := pom.NextOffset()
  262. if actual != expected {
  263. t.Errorf("Expected offset %v. Actual: %v", expected, actual)
  264. }
  265. if meta != "modified_meta" {
  266. t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
  267. }
  268. safeClose(t, pom)
  269. safeClose(t, om)
  270. safeClose(t, testClient)
  271. broker.Close()
  272. coordinator.Close()
  273. }
  274. func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) {
  275. om, testClient, broker, coordinator := initOffsetManager(t, time.Hour)
  276. pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
  277. ocResponse := new(OffsetCommitResponse)
  278. ocResponse.AddError("my_topic", 0, ErrNoError)
  279. handler := func(req *request) (res encoder) {
  280. if req.body.version() != 2 {
  281. t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
  282. }
  283. offsetCommitRequest := req.body.(*OffsetCommitRequest)
  284. if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
  285. t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
  286. }
  287. return ocResponse
  288. }
  289. coordinator.setHandler(handler)
  290. expected := int64(1)
  291. pom.ResetOffset(expected, "modified_meta")
  292. actual, meta := pom.NextOffset()
  293. if actual != expected {
  294. t.Errorf("Expected offset %v. Actual: %v", expected, actual)
  295. }
  296. if meta != "modified_meta" {
  297. t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
  298. }
  299. safeClose(t, pom)
  300. safeClose(t, om)
  301. safeClose(t, testClient)
  302. broker.Close()
  303. coordinator.Close()
  304. }
  305. func TestPartitionOffsetManagerMarkOffset(t *testing.T) {
  306. om, testClient, broker, coordinator := initOffsetManager(t, 0)
  307. pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
  308. ocResponse := new(OffsetCommitResponse)
  309. ocResponse.AddError("my_topic", 0, ErrNoError)
  310. coordinator.Returns(ocResponse)
  311. pom.MarkOffset(100, "modified_meta")
  312. offset, meta := pom.NextOffset()
  313. if offset != 100 {
  314. t.Errorf("Expected offset 100. Actual: %v", offset)
  315. }
  316. if meta != "modified_meta" {
  317. t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
  318. }
  319. safeClose(t, pom)
  320. safeClose(t, om)
  321. safeClose(t, testClient)
  322. broker.Close()
  323. coordinator.Close()
  324. }
  325. func TestPartitionOffsetManagerMarkOffsetWithRetention(t *testing.T) {
  326. om, testClient, broker, coordinator := initOffsetManager(t, time.Hour)
  327. pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
  328. ocResponse := new(OffsetCommitResponse)
  329. ocResponse.AddError("my_topic", 0, ErrNoError)
  330. handler := func(req *request) (res encoder) {
  331. if req.body.version() != 2 {
  332. t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
  333. }
  334. offsetCommitRequest := req.body.(*OffsetCommitRequest)
  335. if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
  336. t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
  337. }
  338. return ocResponse
  339. }
  340. coordinator.setHandler(handler)
  341. pom.MarkOffset(100, "modified_meta")
  342. offset, meta := pom.NextOffset()
  343. if offset != 100 {
  344. t.Errorf("Expected offset 100. Actual: %v", offset)
  345. }
  346. if meta != "modified_meta" {
  347. t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
  348. }
  349. safeClose(t, pom)
  350. safeClose(t, om)
  351. safeClose(t, testClient)
  352. broker.Close()
  353. coordinator.Close()
  354. }
  355. func TestPartitionOffsetManagerCommitErr(t *testing.T) {
  356. om, testClient, broker, coordinator := initOffsetManager(t, 0)
  357. pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta")
  358. // Error on one partition
  359. ocResponse := new(OffsetCommitResponse)
  360. ocResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
  361. ocResponse.AddError("my_topic", 1, ErrNoError)
  362. coordinator.Returns(ocResponse)
  363. newCoordinator := NewMockBroker(t, 3)
  364. // For RefreshCoordinator()
  365. broker.Returns(&ConsumerMetadataResponse{
  366. CoordinatorID: newCoordinator.BrokerID(),
  367. CoordinatorHost: "127.0.0.1",
  368. CoordinatorPort: newCoordinator.Port(),
  369. })
  370. // Nothing in response.Errors at all
  371. ocResponse2 := new(OffsetCommitResponse)
  372. newCoordinator.Returns(ocResponse2)
  373. // No error, no need to refresh coordinator
  374. // Error on the wrong partition for this pom
  375. ocResponse3 := new(OffsetCommitResponse)
  376. ocResponse3.AddError("my_topic", 1, ErrNoError)
  377. newCoordinator.Returns(ocResponse3)
  378. // No error, no need to refresh coordinator
  379. // ErrUnknownTopicOrPartition/ErrNotLeaderForPartition/ErrLeaderNotAvailable block
  380. ocResponse4 := new(OffsetCommitResponse)
  381. ocResponse4.AddError("my_topic", 0, ErrUnknownTopicOrPartition)
  382. newCoordinator.Returns(ocResponse4)
  383. // For RefreshCoordinator()
  384. broker.Returns(&ConsumerMetadataResponse{
  385. CoordinatorID: newCoordinator.BrokerID(),
  386. CoordinatorHost: "127.0.0.1",
  387. CoordinatorPort: newCoordinator.Port(),
  388. })
  389. // Normal error response
  390. ocResponse5 := new(OffsetCommitResponse)
  391. ocResponse5.AddError("my_topic", 0, ErrNoError)
  392. newCoordinator.Returns(ocResponse5)
  393. pom.MarkOffset(100, "modified_meta")
  394. err := pom.Close()
  395. if err != nil {
  396. t.Error(err)
  397. }
  398. broker.Close()
  399. coordinator.Close()
  400. newCoordinator.Close()
  401. safeClose(t, om)
  402. safeClose(t, testClient)
  403. }
  404. // Test of recovery from abort
  405. func TestAbortPartitionOffsetManager(t *testing.T) {
  406. om, testClient, broker, coordinator := initOffsetManager(t, 0)
  407. pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta")
  408. // this triggers an error in the CommitOffset request,
  409. // which leads to the abort call
  410. coordinator.Close()
  411. // Response to refresh coordinator request
  412. newCoordinator := NewMockBroker(t, 3)
  413. broker.Returns(&ConsumerMetadataResponse{
  414. CoordinatorID: newCoordinator.BrokerID(),
  415. CoordinatorHost: "127.0.0.1",
  416. CoordinatorPort: newCoordinator.Port(),
  417. })
  418. ocResponse := new(OffsetCommitResponse)
  419. ocResponse.AddError("my_topic", 0, ErrNoError)
  420. newCoordinator.Returns(ocResponse)
  421. pom.MarkOffset(100, "modified_meta")
  422. safeClose(t, pom)
  423. safeClose(t, om)
  424. broker.Close()
  425. safeClose(t, testClient)
  426. }