|
@@ -115,19 +115,44 @@ func (client *Client) Close() error {
|
|
|
return nil
|
|
return nil
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-// Partitions returns the sorted list of available partition IDs for the given topic.
|
|
|
|
|
|
|
+// Partitions returns the sorted list of all partition IDs for the given topic.
|
|
|
func (client *Client) Partitions(topic string) ([]int32, error) {
|
|
func (client *Client) Partitions(topic string) ([]int32, error) {
|
|
|
// Check to see whether the client is closed
|
|
// Check to see whether the client is closed
|
|
|
if client.Closed() {
|
|
if client.Closed() {
|
|
|
return nil, ClosedClient
|
|
return nil, ClosedClient
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- partitions := client.cachedPartitions(topic)
|
|
|
|
|
|
|
+ partitions := client.cachedPartitions(topic, allPartitions)
|
|
|
|
|
+
|
|
|
|
|
+ if len(partitions) == 0 {
|
|
|
|
|
+ err := client.RefreshTopicMetadata(topic)
|
|
|
|
|
+ if err != nil {
|
|
|
|
|
+ return nil, err
|
|
|
|
|
+ }
|
|
|
|
|
+ partitions = client.cachedPartitions(topic, allPartitions)
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if partitions == nil {
|
|
|
|
|
+ return nil, UnknownTopicOrPartition
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return partitions, nil
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// WritablePartitions returns the sorted list of all writable partition IDs for the given topic,
|
|
|
|
|
+// where "writable" means "having a valid leader accepting writes".
|
|
|
|
|
+func (client *Client) WritablePartitions(topic string) ([]int32, error) {
|
|
|
|
|
+ // Check to see whether the client is closed
|
|
|
|
|
+ if client.Closed() {
|
|
|
|
|
+ return nil, ClosedClient
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ partitions := client.cachedPartitions(topic, writablePartitions)
|
|
|
|
|
|
|
|
// len==0 catches when it's nil (no such topic) and the odd case when every single
|
|
// len==0 catches when it's nil (no such topic) and the odd case when every single
|
|
|
// partition is undergoing leader election simultaneously. Callers have to be able to handle
|
|
// partition is undergoing leader election simultaneously. Callers have to be able to handle
|
|
|
// this function returning an empty slice (which is a valid return value) but catching it
|
|
// this function returning an empty slice (which is a valid return value) but catching it
|
|
|
- // here the first time (note we *don't* catch it below where we return NoSuchTopic) triggers
|
|
|
|
|
|
|
+ // here the first time (note we *don't* catch it below where we return UnknownTopicOrPartition) triggers
|
|
|
// a metadata refresh as a nicety so callers can just try again and don't have to manually
|
|
// a metadata refresh as a nicety so callers can just try again and don't have to manually
|
|
|
// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
|
|
// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
|
|
|
if len(partitions) == 0 {
|
|
if len(partitions) == 0 {
|
|
@@ -135,11 +160,11 @@ func (client *Client) Partitions(topic string) ([]int32, error) {
|
|
|
if err != nil {
|
|
if err != nil {
|
|
|
return nil, err
|
|
return nil, err
|
|
|
}
|
|
}
|
|
|
- partitions = client.cachedPartitions(topic)
|
|
|
|
|
|
|
+ partitions = client.cachedPartitions(topic, writablePartitions)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
if partitions == nil {
|
|
if partitions == nil {
|
|
|
- return nil, NoSuchTopic
|
|
|
|
|
|
|
+ return nil, UnknownTopicOrPartition
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
return partitions, nil
|
|
return partitions, nil
|
|
@@ -182,43 +207,53 @@ func (client *Client) getMetadata(topic string, partitionID int32) (*PartitionMe
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
func (client *Client) Replicas(topic string, partitionID int32) ([]int32, error) {
|
|
func (client *Client) Replicas(topic string, partitionID int32) ([]int32, error) {
|
|
|
|
|
+ if client.Closed() {
|
|
|
|
|
+ return nil, ClosedClient
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
metadata, err := client.getMetadata(topic, partitionID)
|
|
metadata, err := client.getMetadata(topic, partitionID)
|
|
|
|
|
|
|
|
if err != nil {
|
|
if err != nil {
|
|
|
return nil, err
|
|
return nil, err
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ if metadata.Err == ReplicaNotAvailable {
|
|
|
|
|
+ return nil, metadata.Err
|
|
|
|
|
+ }
|
|
|
return dupeAndSort(metadata.Replicas), nil
|
|
return dupeAndSort(metadata.Replicas), nil
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
func (client *Client) ReplicasInSync(topic string, partitionID int32) ([]int32, error) {
|
|
func (client *Client) ReplicasInSync(topic string, partitionID int32) ([]int32, error) {
|
|
|
|
|
+ if client.Closed() {
|
|
|
|
|
+ return nil, ClosedClient
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
metadata, err := client.getMetadata(topic, partitionID)
|
|
metadata, err := client.getMetadata(topic, partitionID)
|
|
|
|
|
|
|
|
if err != nil {
|
|
if err != nil {
|
|
|
return nil, err
|
|
return nil, err
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ if metadata.Err == ReplicaNotAvailable {
|
|
|
|
|
+ return nil, metadata.Err
|
|
|
|
|
+ }
|
|
|
return dupeAndSort(metadata.Isr), nil
|
|
return dupeAndSort(metadata.Isr), nil
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
// Leader returns the broker object that is the leader of the current topic/partition, as
|
|
// Leader returns the broker object that is the leader of the current topic/partition, as
|
|
|
// determined by querying the cluster metadata.
|
|
// determined by querying the cluster metadata.
|
|
|
func (client *Client) Leader(topic string, partitionID int32) (*Broker, error) {
|
|
func (client *Client) Leader(topic string, partitionID int32) (*Broker, error) {
|
|
|
- leader := client.cachedLeader(topic, partitionID)
|
|
|
|
|
|
|
+ leader, err := client.cachedLeader(topic, partitionID)
|
|
|
|
|
|
|
|
if leader == nil {
|
|
if leader == nil {
|
|
|
err := client.RefreshTopicMetadata(topic)
|
|
err := client.RefreshTopicMetadata(topic)
|
|
|
if err != nil {
|
|
if err != nil {
|
|
|
return nil, err
|
|
return nil, err
|
|
|
}
|
|
}
|
|
|
- leader = client.cachedLeader(topic, partitionID)
|
|
|
|
|
|
|
+ leader, err = client.cachedLeader(topic, partitionID)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- if leader == nil {
|
|
|
|
|
- return nil, UnknownTopicOrPartition
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- return leader, nil
|
|
|
|
|
|
|
+ return leader, err
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
// RefreshTopicMetadata takes a list of topics and queries the cluster to refresh the
|
|
// RefreshTopicMetadata takes a list of topics and queries the cluster to refresh the
|
|
@@ -310,7 +345,7 @@ func (client *Client) refreshMetadata(topics []string, retries int) error {
|
|
|
// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
|
|
// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
|
|
|
for _, topic := range topics {
|
|
for _, topic := range topics {
|
|
|
if len(topic) == 0 {
|
|
if len(topic) == 0 {
|
|
|
- return NoSuchTopic
|
|
|
|
|
|
|
+ return UnknownTopicOrPartition
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -386,7 +421,7 @@ func (client *Client) any() *Broker {
|
|
|
return client.seedBroker
|
|
return client.seedBroker
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-func (client *Client) cachedLeader(topic string, partitionID int32) *Broker {
|
|
|
|
|
|
|
+func (client *Client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
|
|
|
client.lock.RLock()
|
|
client.lock.RLock()
|
|
|
defer client.lock.RUnlock()
|
|
defer client.lock.RUnlock()
|
|
|
|
|
|
|
@@ -394,11 +429,14 @@ func (client *Client) cachedLeader(topic string, partitionID int32) *Broker {
|
|
|
if partitions != nil {
|
|
if partitions != nil {
|
|
|
metadata, ok := partitions[partitionID]
|
|
metadata, ok := partitions[partitionID]
|
|
|
if ok {
|
|
if ok {
|
|
|
- return client.brokers[metadata.Leader]
|
|
|
|
|
|
|
+ if metadata.Err == LeaderNotAvailable {
|
|
|
|
|
+ return nil, metadata.Err
|
|
|
|
|
+ }
|
|
|
|
|
+ return client.brokers[metadata.Leader], nil
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- return nil
|
|
|
|
|
|
|
+ return nil, UnknownTopicOrPartition
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
func (client *Client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
|
|
func (client *Client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
|
|
@@ -413,7 +451,12 @@ func (client *Client) cachedMetadata(topic string, partitionID int32) *Partition
|
|
|
return nil
|
|
return nil
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-func (client *Client) cachedPartitions(topic string) []int32 {
|
|
|
|
|
|
|
+const (
|
|
|
|
|
+ allPartitions = iota
|
|
|
|
|
+ writablePartitions
|
|
|
|
|
+)
|
|
|
|
|
+
|
|
|
|
|
+func (client *Client) cachedPartitions(topic string, partitionSet int) []int32 {
|
|
|
client.lock.RLock()
|
|
client.lock.RLock()
|
|
|
defer client.lock.RUnlock()
|
|
defer client.lock.RUnlock()
|
|
|
|
|
|
|
@@ -423,8 +466,11 @@ func (client *Client) cachedPartitions(topic string) []int32 {
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
ret := make([]int32, 0, len(partitions))
|
|
ret := make([]int32, 0, len(partitions))
|
|
|
- for id := range partitions {
|
|
|
|
|
- ret = append(ret, id)
|
|
|
|
|
|
|
+ for _, partition := range partitions {
|
|
|
|
|
+ if partitionSet == writablePartitions && partition.Err == LeaderNotAvailable {
|
|
|
|
|
+ continue
|
|
|
|
|
+ }
|
|
|
|
|
+ ret = append(ret, partition.ID)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
sort.Sort(int32Slice(ret))
|
|
sort.Sort(int32Slice(ret))
|
|
@@ -479,32 +525,24 @@ func (client *Client) update(data *MetadataResponse) ([]string, error) {
|
|
|
|
|
|
|
|
var err error
|
|
var err error
|
|
|
for _, topic := range data.Topics {
|
|
for _, topic := range data.Topics {
|
|
|
- switch topic.Err {
|
|
|
|
|
- case NoError:
|
|
|
|
|
- break
|
|
|
|
|
- case LeaderNotAvailable:
|
|
|
|
|
- toRetry[topic.Name] = true
|
|
|
|
|
- default:
|
|
|
|
|
|
|
+ if topic.Err != NoError {
|
|
|
err = topic.Err
|
|
err = topic.Err
|
|
|
|
|
+ continue
|
|
|
}
|
|
}
|
|
|
|
|
+
|
|
|
client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
|
|
client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
|
|
|
for _, partition := range topic.Partitions {
|
|
for _, partition := range topic.Partitions {
|
|
|
|
|
+ client.metadata[topic.Name][partition.ID] = partition
|
|
|
switch partition.Err {
|
|
switch partition.Err {
|
|
|
case NoError:
|
|
case NoError:
|
|
|
broker := client.brokers[partition.Leader]
|
|
broker := client.brokers[partition.Leader]
|
|
|
if _, present := client.deadBrokerAddrs[broker.Addr()]; present {
|
|
if _, present := client.deadBrokerAddrs[broker.Addr()]; present {
|
|
|
if connected, _ := broker.Connected(); !connected {
|
|
if connected, _ := broker.Connected(); !connected {
|
|
|
toRetry[topic.Name] = true
|
|
toRetry[topic.Name] = true
|
|
|
- delete(client.metadata[topic.Name], partition.ID)
|
|
|
|
|
- continue
|
|
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
- client.metadata[topic.Name][partition.ID] = partition
|
|
|
|
|
case LeaderNotAvailable:
|
|
case LeaderNotAvailable:
|
|
|
toRetry[topic.Name] = true
|
|
toRetry[topic.Name] = true
|
|
|
- delete(client.metadata[topic.Name], partition.ID)
|
|
|
|
|
- default:
|
|
|
|
|
- err = partition.Err
|
|
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
@@ -532,12 +570,12 @@ func NewClientConfig() *ClientConfig {
|
|
|
// Validate checks a ClientConfig instance. This will return a
|
|
// Validate checks a ClientConfig instance. This will return a
|
|
|
// ConfigurationError if the specified values don't make sense.
|
|
// ConfigurationError if the specified values don't make sense.
|
|
|
func (config *ClientConfig) Validate() error {
|
|
func (config *ClientConfig) Validate() error {
|
|
|
- if config.MetadataRetries <= 0 {
|
|
|
|
|
- return ConfigurationError("Invalid MetadataRetries. Try 10")
|
|
|
|
|
|
|
+ if config.MetadataRetries < 0 {
|
|
|
|
|
+ return ConfigurationError("Invalid MetadataRetries, must be >= 0")
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
if config.WaitForElection <= time.Duration(0) {
|
|
if config.WaitForElection <= time.Duration(0) {
|
|
|
- return ConfigurationError("Invalid WaitForElection. Try 250*time.Millisecond")
|
|
|
|
|
|
|
+ return ConfigurationError("Invalid WaitForElection, must be > 0")
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
if config.DefaultBrokerConf != nil {
|
|
if config.DefaultBrokerConf != nil {
|
|
@@ -547,7 +585,7 @@ func (config *ClientConfig) Validate() error {
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
if config.BackgroundRefreshFrequency < time.Duration(0) {
|
|
if config.BackgroundRefreshFrequency < time.Duration(0) {
|
|
|
- return ConfigurationError("Invalid BackgroundRefreshFrequency.")
|
|
|
|
|
|
|
+ return ConfigurationError("Invalid BackgroundRefreshFrequency, must be >= 0")
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
return nil
|