admin.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. package sarama
  2. import (
  3. "errors"
  4. "fmt"
  5. "math/rand"
  6. "strconv"
  7. "sync"
  8. "time"
  9. )
  10. // ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
  11. // brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
  12. // Methods with stricter requirements will specify the minimum broker version required.
  13. // You MUST call Close() on a client to avoid leaks
  14. type ClusterAdmin interface {
  15. // Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
  16. // It may take several seconds after CreateTopic returns success for all the brokers
  17. // to become aware that the topic has been created. During this time, listTopics
  18. // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
  19. CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
  20. // List the topics available in the cluster with the default options.
  21. ListTopics() (map[string]TopicDetail, error)
  22. // Describe some topics in the cluster.
  23. DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
  24. // Delete a topic. It may take several seconds after the DeleteTopic to returns success
  25. // and for all the brokers to become aware that the topics are gone.
  26. // During this time, listTopics may continue to return information about the deleted topic.
  27. // If delete.topic.enable is false on the brokers, deleteTopic will mark
  28. // the topic for deletion, but not actually delete them.
  29. // This operation is supported by brokers with version 0.10.1.0 or higher.
  30. DeleteTopic(topic string) error
  31. // Increase the number of partitions of the topics according to the corresponding values.
  32. // If partitions are increased for a topic that has a key, the partition logic or ordering of
  33. // the messages will be affected. It may take several seconds after this method returns
  34. // success for all the brokers to become aware that the partitions have been created.
  35. // During this time, ClusterAdmin#describeTopics may not return information about the
  36. // new partitions. This operation is supported by brokers with version 1.0.0 or higher.
  37. CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
  38. // Delete records whose offset is smaller than the given offset of the corresponding partition.
  39. // This operation is supported by brokers with version 0.11.0.0 or higher.
  40. DeleteRecords(topic string, partitionOffsets map[int32]int64) error
  41. // Get the configuration for the specified resources.
  42. // The returned configuration includes default values and the Default is true
  43. // can be used to distinguish them from user supplied values.
  44. // Config entries where ReadOnly is true cannot be updated.
  45. // The value of config entries where Sensitive is true is always nil so
  46. // sensitive information is not disclosed.
  47. // This operation is supported by brokers with version 0.11.0.0 or higher.
  48. DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
  49. // Update the configuration for the specified resources with the default options.
  50. // This operation is supported by brokers with version 0.11.0.0 or higher.
  51. // The resources with their configs (topic is the only resource type with configs
  52. // that can be updated currently Updates are not transactional so they may succeed
  53. // for some resources while fail for others. The configs for a particular resource are updated automatically.
  54. AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
  55. // Creates access control lists (ACLs) which are bound to specific resources.
  56. // This operation is not transactional so it may succeed for some ACLs while fail for others.
  57. // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
  58. // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
  59. CreateACL(resource Resource, acl Acl) error
  60. // Lists access control lists (ACLs) according to the supplied filter.
  61. // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
  62. // This operation is supported by brokers with version 0.11.0.0 or higher.
  63. ListAcls(filter AclFilter) ([]ResourceAcls, error)
  64. // Deletes access control lists (ACLs) according to the supplied filters.
  65. // This operation is not transactional so it may succeed for some ACLs while fail for others.
  66. // This operation is supported by brokers with version 0.11.0.0 or higher.
  67. DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
  68. // List the consumer groups available in the cluster.
  69. ListConsumerGroups() (map[string]string, error)
  70. // Describe the given consumer groups.
  71. DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
  72. // List the consumer group offsets available in the cluster.
  73. ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
  74. // Delete a consumer group.
  75. DeleteConsumerGroup(group string) error
  76. // Get information about the nodes in the cluster
  77. DescribeCluster() (brokers []*Broker, controllerID int32, err error)
  78. // Close shuts down the admin and closes underlying client.
  79. Close() error
  80. }
  81. type clusterAdmin struct {
  82. client Client
  83. conf *Config
  84. }
  85. // NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
  86. func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
  87. client, err := NewClient(addrs, conf)
  88. if err != nil {
  89. return nil, err
  90. }
  91. return NewClusterAdminFromClient(client)
  92. }
  93. // NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
  94. // Note that underlying client will also be closed on admin's Close() call.
  95. func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
  96. //make sure we can retrieve the controller
  97. _, err := client.Controller()
  98. if err != nil {
  99. return nil, err
  100. }
  101. ca := &clusterAdmin{
  102. client: client,
  103. conf: client.Config(),
  104. }
  105. return ca, nil
  106. }
  107. func (ca *clusterAdmin) Close() error {
  108. return ca.client.Close()
  109. }
  110. func (ca *clusterAdmin) Controller() (*Broker, error) {
  111. return ca.client.Controller()
  112. }
  113. func (ca *clusterAdmin) refreshController() (*Broker, error) {
  114. return ca.client.RefreshController()
  115. }
  116. // isErrNoController returns `true` if the given error type unwraps to an
  117. // `ErrNotController` response from Kafka
  118. func isErrNoController(err error) bool {
  119. switch e := err.(type) {
  120. case *TopicError:
  121. return e.Err == ErrNotController
  122. case *TopicPartitionError:
  123. return e.Err == ErrNotController
  124. case KError:
  125. return e == ErrNotController
  126. }
  127. return false
  128. }
  129. // retryOnError will repeatedly call the given (error-returning) func in the
  130. // case that its response is non-nil and retriable (as determined by the
  131. // provided retriable func) up to the maximum number of tries permitted by
  132. // the admin client configuration
  133. func (ca *clusterAdmin) retryOnError(retriable func(error) bool, fn func() error) error {
  134. var err error
  135. for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ {
  136. err = fn()
  137. if err == nil || !retriable(err) {
  138. return err
  139. }
  140. Logger.Printf(
  141. "admin/request retrying after %dms... (%d attempts remaining)\n",
  142. ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt)
  143. time.Sleep(ca.conf.Admin.Retry.Backoff)
  144. continue
  145. }
  146. return err
  147. }
  148. func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
  149. if topic == "" {
  150. return ErrInvalidTopic
  151. }
  152. if detail == nil {
  153. return errors.New("you must specify topic details")
  154. }
  155. topicDetails := make(map[string]*TopicDetail)
  156. topicDetails[topic] = detail
  157. request := &CreateTopicsRequest{
  158. TopicDetails: topicDetails,
  159. ValidateOnly: validateOnly,
  160. Timeout: ca.conf.Admin.Timeout,
  161. }
  162. if ca.conf.Version.IsAtLeast(V0_11_0_0) {
  163. request.Version = 1
  164. }
  165. if ca.conf.Version.IsAtLeast(V1_0_0_0) {
  166. request.Version = 2
  167. }
  168. return ca.retryOnError(isErrNoController, func() error {
  169. b, err := ca.Controller()
  170. if err != nil {
  171. return err
  172. }
  173. rsp, err := b.CreateTopics(request)
  174. if err != nil {
  175. return err
  176. }
  177. topicErr, ok := rsp.TopicErrors[topic]
  178. if !ok {
  179. return ErrIncompleteResponse
  180. }
  181. if topicErr.Err != ErrNoError {
  182. if topicErr.Err == ErrNotController {
  183. _, _ = ca.refreshController()
  184. }
  185. return topicErr
  186. }
  187. return nil
  188. })
  189. }
  190. func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
  191. controller, err := ca.Controller()
  192. if err != nil {
  193. return nil, err
  194. }
  195. request := &MetadataRequest{
  196. Topics: topics,
  197. AllowAutoTopicCreation: false,
  198. }
  199. if ca.conf.Version.IsAtLeast(V1_0_0_0) {
  200. request.Version = 5
  201. } else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
  202. request.Version = 4
  203. }
  204. response, err := controller.GetMetadata(request)
  205. if err != nil {
  206. return nil, err
  207. }
  208. return response.Topics, nil
  209. }
  210. func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
  211. controller, err := ca.Controller()
  212. if err != nil {
  213. return nil, int32(0), err
  214. }
  215. request := &MetadataRequest{
  216. Topics: []string{},
  217. }
  218. if ca.conf.Version.IsAtLeast(V0_10_0_0) {
  219. request.Version = 1
  220. }
  221. response, err := controller.GetMetadata(request)
  222. if err != nil {
  223. return nil, int32(0), err
  224. }
  225. return response.Brokers, response.ControllerID, nil
  226. }
  227. func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
  228. brokers := ca.client.Brokers()
  229. for _, b := range brokers {
  230. if b.ID() == id {
  231. return b, nil
  232. }
  233. }
  234. return nil, fmt.Errorf("could not find broker id %d", id)
  235. }
  236. func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
  237. brokers := ca.client.Brokers()
  238. if len(brokers) > 0 {
  239. index := rand.Intn(len(brokers))
  240. return brokers[index], nil
  241. }
  242. return nil, errors.New("no available broker")
  243. }
  244. func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
  245. // In order to build TopicDetails we need to first get the list of all
  246. // topics using a MetadataRequest and then get their configs using a
  247. // DescribeConfigsRequest request. To avoid sending many requests to the
  248. // broker, we use a single DescribeConfigsRequest.
  249. // Send the all-topic MetadataRequest
  250. b, err := ca.findAnyBroker()
  251. if err != nil {
  252. return nil, err
  253. }
  254. _ = b.Open(ca.client.Config())
  255. metadataReq := &MetadataRequest{}
  256. metadataResp, err := b.GetMetadata(metadataReq)
  257. if err != nil {
  258. return nil, err
  259. }
  260. topicsDetailsMap := make(map[string]TopicDetail)
  261. var describeConfigsResources []*ConfigResource
  262. for _, topic := range metadataResp.Topics {
  263. topicDetails := TopicDetail{
  264. NumPartitions: int32(len(topic.Partitions)),
  265. }
  266. if len(topic.Partitions) > 0 {
  267. topicDetails.ReplicaAssignment = map[int32][]int32{}
  268. for _, partition := range topic.Partitions {
  269. topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
  270. }
  271. topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
  272. }
  273. topicsDetailsMap[topic.Name] = topicDetails
  274. // we populate the resources we want to describe from the MetadataResponse
  275. topicResource := ConfigResource{
  276. Type: TopicResource,
  277. Name: topic.Name,
  278. }
  279. describeConfigsResources = append(describeConfigsResources, &topicResource)
  280. }
  281. // Send the DescribeConfigsRequest
  282. describeConfigsReq := &DescribeConfigsRequest{
  283. Resources: describeConfigsResources,
  284. }
  285. describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
  286. if err != nil {
  287. return nil, err
  288. }
  289. for _, resource := range describeConfigsResp.Resources {
  290. topicDetails := topicsDetailsMap[resource.Name]
  291. topicDetails.ConfigEntries = make(map[string]*string)
  292. for _, entry := range resource.Configs {
  293. // only include non-default non-sensitive config
  294. // (don't actually think topic config will ever be sensitive)
  295. if entry.Default || entry.Sensitive {
  296. continue
  297. }
  298. topicDetails.ConfigEntries[entry.Name] = &entry.Value
  299. }
  300. topicsDetailsMap[resource.Name] = topicDetails
  301. }
  302. return topicsDetailsMap, nil
  303. }
  304. func (ca *clusterAdmin) DeleteTopic(topic string) error {
  305. if topic == "" {
  306. return ErrInvalidTopic
  307. }
  308. request := &DeleteTopicsRequest{
  309. Topics: []string{topic},
  310. Timeout: ca.conf.Admin.Timeout,
  311. }
  312. if ca.conf.Version.IsAtLeast(V0_11_0_0) {
  313. request.Version = 1
  314. }
  315. return ca.retryOnError(isErrNoController, func() error {
  316. b, err := ca.Controller()
  317. if err != nil {
  318. return err
  319. }
  320. rsp, err := b.DeleteTopics(request)
  321. if err != nil {
  322. return err
  323. }
  324. topicErr, ok := rsp.TopicErrorCodes[topic]
  325. if !ok {
  326. return ErrIncompleteResponse
  327. }
  328. if topicErr != ErrNoError {
  329. if topicErr == ErrNotController {
  330. _, _ = ca.refreshController()
  331. }
  332. return topicErr
  333. }
  334. return nil
  335. })
  336. }
  337. func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
  338. if topic == "" {
  339. return ErrInvalidTopic
  340. }
  341. topicPartitions := make(map[string]*TopicPartition)
  342. topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
  343. request := &CreatePartitionsRequest{
  344. TopicPartitions: topicPartitions,
  345. Timeout: ca.conf.Admin.Timeout,
  346. }
  347. return ca.retryOnError(isErrNoController, func() error {
  348. b, err := ca.Controller()
  349. if err != nil {
  350. return err
  351. }
  352. rsp, err := b.CreatePartitions(request)
  353. if err != nil {
  354. return err
  355. }
  356. topicErr, ok := rsp.TopicPartitionErrors[topic]
  357. if !ok {
  358. return ErrIncompleteResponse
  359. }
  360. if topicErr.Err != ErrNoError {
  361. if topicErr.Err == ErrNotController {
  362. _, _ = ca.refreshController()
  363. }
  364. return topicErr
  365. }
  366. return nil
  367. })
  368. }
  369. func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
  370. if topic == "" {
  371. return ErrInvalidTopic
  372. }
  373. partitionPerBroker := make(map[*Broker][]int32)
  374. for partition := range partitionOffsets {
  375. broker, err := ca.client.Leader(topic, partition)
  376. if err != nil {
  377. return err
  378. }
  379. if _, ok := partitionPerBroker[broker]; ok {
  380. partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
  381. } else {
  382. partitionPerBroker[broker] = []int32{partition}
  383. }
  384. }
  385. errs := make([]error, 0)
  386. for broker, partitions := range partitionPerBroker {
  387. topics := make(map[string]*DeleteRecordsRequestTopic)
  388. recordsToDelete := make(map[int32]int64)
  389. for _, p := range partitions {
  390. recordsToDelete[p] = partitionOffsets[p]
  391. }
  392. topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete}
  393. request := &DeleteRecordsRequest{
  394. Topics: topics,
  395. Timeout: ca.conf.Admin.Timeout,
  396. }
  397. rsp, err := broker.DeleteRecords(request)
  398. if err != nil {
  399. errs = append(errs, err)
  400. } else {
  401. deleteRecordsResponseTopic, ok := rsp.Topics[topic]
  402. if !ok {
  403. errs = append(errs, ErrIncompleteResponse)
  404. } else {
  405. for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
  406. if deleteRecordsResponsePartition.Err != ErrNoError {
  407. errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error()))
  408. }
  409. }
  410. }
  411. }
  412. }
  413. if len(errs) > 0 {
  414. return ErrDeleteRecords{MultiError{&errs}}
  415. }
  416. //todo since we are dealing with couple of partitions it would be good if we return slice of errors
  417. //for each partition instead of one error
  418. return nil
  419. }
  420. // Returns a bool indicating whether the resource request needs to go to a
  421. // specific broker
  422. func dependsOnSpecificNode(resource ConfigResource) bool {
  423. return (resource.Type == BrokerResource && resource.Name != "") ||
  424. resource.Type == BrokerLoggerResource
  425. }
  426. func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
  427. var entries []ConfigEntry
  428. var resources []*ConfigResource
  429. resources = append(resources, &resource)
  430. request := &DescribeConfigsRequest{
  431. Resources: resources,
  432. }
  433. if ca.conf.Version.IsAtLeast(V1_1_0_0) {
  434. request.Version = 1
  435. }
  436. if ca.conf.Version.IsAtLeast(V2_0_0_0) {
  437. request.Version = 2
  438. }
  439. var (
  440. b *Broker
  441. err error
  442. )
  443. // DescribeConfig of broker/broker logger must be sent to the broker in question
  444. if dependsOnSpecificNode(resource) {
  445. id, _ := strconv.Atoi(resource.Name)
  446. b, err = ca.findBroker(int32(id))
  447. } else {
  448. b, err = ca.findAnyBroker()
  449. }
  450. if err != nil {
  451. return nil, err
  452. }
  453. _ = b.Open(ca.client.Config())
  454. rsp, err := b.DescribeConfigs(request)
  455. if err != nil {
  456. return nil, err
  457. }
  458. for _, rspResource := range rsp.Resources {
  459. if rspResource.Name == resource.Name {
  460. if rspResource.ErrorMsg != "" {
  461. return nil, errors.New(rspResource.ErrorMsg)
  462. }
  463. for _, cfgEntry := range rspResource.Configs {
  464. entries = append(entries, *cfgEntry)
  465. }
  466. }
  467. }
  468. return entries, nil
  469. }
  470. func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
  471. var resources []*AlterConfigsResource
  472. resources = append(resources, &AlterConfigsResource{
  473. Type: resourceType,
  474. Name: name,
  475. ConfigEntries: entries,
  476. })
  477. request := &AlterConfigsRequest{
  478. Resources: resources,
  479. ValidateOnly: validateOnly,
  480. }
  481. var (
  482. b *Broker
  483. err error
  484. )
  485. // AlterConfig of broker/broker logger must be sent to the broker in question
  486. if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
  487. id, _ := strconv.Atoi(name)
  488. b, err = ca.findBroker(int32(id))
  489. } else {
  490. b, err = ca.findAnyBroker()
  491. }
  492. if err != nil {
  493. return err
  494. }
  495. _ = b.Open(ca.client.Config())
  496. rsp, err := b.AlterConfigs(request)
  497. if err != nil {
  498. return err
  499. }
  500. for _, rspResource := range rsp.Resources {
  501. if rspResource.Name == name {
  502. if rspResource.ErrorMsg != "" {
  503. return errors.New(rspResource.ErrorMsg)
  504. }
  505. }
  506. }
  507. return nil
  508. }
  509. func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
  510. var acls []*AclCreation
  511. acls = append(acls, &AclCreation{resource, acl})
  512. request := &CreateAclsRequest{AclCreations: acls}
  513. if ca.conf.Version.IsAtLeast(V2_0_0_0) {
  514. request.Version = 1
  515. }
  516. b, err := ca.Controller()
  517. if err != nil {
  518. return err
  519. }
  520. _, err = b.CreateAcls(request)
  521. return err
  522. }
  523. func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
  524. request := &DescribeAclsRequest{AclFilter: filter}
  525. if ca.conf.Version.IsAtLeast(V2_0_0_0) {
  526. request.Version = 1
  527. }
  528. b, err := ca.Controller()
  529. if err != nil {
  530. return nil, err
  531. }
  532. rsp, err := b.DescribeAcls(request)
  533. if err != nil {
  534. return nil, err
  535. }
  536. var lAcls []ResourceAcls
  537. for _, rAcl := range rsp.ResourceAcls {
  538. lAcls = append(lAcls, *rAcl)
  539. }
  540. return lAcls, nil
  541. }
  542. func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
  543. var filters []*AclFilter
  544. filters = append(filters, &filter)
  545. request := &DeleteAclsRequest{Filters: filters}
  546. if ca.conf.Version.IsAtLeast(V2_0_0_0) {
  547. request.Version = 1
  548. }
  549. b, err := ca.Controller()
  550. if err != nil {
  551. return nil, err
  552. }
  553. rsp, err := b.DeleteAcls(request)
  554. if err != nil {
  555. return nil, err
  556. }
  557. var mAcls []MatchingAcl
  558. for _, fr := range rsp.FilterResponses {
  559. for _, mACL := range fr.MatchingAcls {
  560. mAcls = append(mAcls, *mACL)
  561. }
  562. }
  563. return mAcls, nil
  564. }
  565. func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
  566. groupsPerBroker := make(map[*Broker][]string)
  567. for _, group := range groups {
  568. controller, err := ca.client.Coordinator(group)
  569. if err != nil {
  570. return nil, err
  571. }
  572. groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
  573. }
  574. for broker, brokerGroups := range groupsPerBroker {
  575. response, err := broker.DescribeGroups(&DescribeGroupsRequest{
  576. Groups: brokerGroups,
  577. })
  578. if err != nil {
  579. return nil, err
  580. }
  581. result = append(result, response.Groups...)
  582. }
  583. return result, nil
  584. }
  585. func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
  586. allGroups = make(map[string]string)
  587. // Query brokers in parallel, since we have to query *all* brokers
  588. brokers := ca.client.Brokers()
  589. groupMaps := make(chan map[string]string, len(brokers))
  590. errors := make(chan error, len(brokers))
  591. wg := sync.WaitGroup{}
  592. for _, b := range brokers {
  593. wg.Add(1)
  594. go func(b *Broker, conf *Config) {
  595. defer wg.Done()
  596. _ = b.Open(conf) // Ensure that broker is opened
  597. response, err := b.ListGroups(&ListGroupsRequest{})
  598. if err != nil {
  599. errors <- err
  600. return
  601. }
  602. groups := make(map[string]string)
  603. for group, typ := range response.Groups {
  604. groups[group] = typ
  605. }
  606. groupMaps <- groups
  607. }(b, ca.conf)
  608. }
  609. wg.Wait()
  610. close(groupMaps)
  611. close(errors)
  612. for groupMap := range groupMaps {
  613. for group, protocolType := range groupMap {
  614. allGroups[group] = protocolType
  615. }
  616. }
  617. // Intentionally return only the first error for simplicity
  618. err = <-errors
  619. return
  620. }
  621. func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
  622. coordinator, err := ca.client.Coordinator(group)
  623. if err != nil {
  624. return nil, err
  625. }
  626. request := &OffsetFetchRequest{
  627. ConsumerGroup: group,
  628. partitions: topicPartitions,
  629. }
  630. if ca.conf.Version.IsAtLeast(V0_10_2_0) {
  631. request.Version = 2
  632. } else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
  633. request.Version = 1
  634. }
  635. return coordinator.FetchOffset(request)
  636. }
  637. func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
  638. coordinator, err := ca.client.Coordinator(group)
  639. if err != nil {
  640. return err
  641. }
  642. request := &DeleteGroupsRequest{
  643. Groups: []string{group},
  644. }
  645. resp, err := coordinator.DeleteGroups(request)
  646. if err != nil {
  647. return err
  648. }
  649. groupErr, ok := resp.GroupErrorCodes[group]
  650. if !ok {
  651. return ErrIncompleteResponse
  652. }
  653. if groupErr != ErrNoError {
  654. return groupErr
  655. }
  656. return nil
  657. }