Browse Source

Finish refactor to remove namespacing

Evan Huus 12 years ago
parent
commit
799b00cdad
12 changed files with 57 additions and 89 deletions
  1. 17 21
      client.go
  2. 7 8
      client_test.go
  3. 5 11
      consumer.go
  4. 2 3
      consumer_test.go
  5. 15 0
      errors.go
  6. 0 18
      kafka/errors.go
  7. 4 3
      message.go
  8. 0 0
      partitioner.go
  9. 0 0
      partitioner_test.go
  10. 5 11
      producer.go
  11. 2 3
      producer_test.go
  12. 0 11
      utils.go

+ 17 - 21
kafka/client.go → client.go

@@ -5,11 +5,7 @@ It is built on sister package sarama/protocol.
 */
 package kafka
 
-import k "sarama/protocol"
-
 import (
-	"sarama/encoding"
-	"sarama/types"
 	"sort"
 	"sync"
 	"time"
@@ -21,7 +17,7 @@ import (
 // multiple concurrent Producers and Consumers.
 type Client struct {
 	id      string                     // client id for broker requests
-	brokers map[int32]*k.Broker        // maps broker ids to brokers
+	brokers map[int32]*Broker          // maps broker ids to brokers
 	leaders map[string]map[int32]int32 // maps topics to partition ids to broker ids
 	lock    sync.RWMutex               // protects access to the maps, only one since they're always written together
 }
@@ -30,7 +26,7 @@ type Client struct {
 // host:port address, and uses that broker to automatically fetch metadata on the rest of the kafka cluster.
 // If metadata cannot be retrieved (even if the connection otherwise succeeds) then the client is not created.
 func NewClient(id string, host string, port int32) (client *Client, err error) {
-	tmp := k.NewBroker(host, port)
+	tmp := NewBroker(host, port)
 	err = tmp.Connect()
 	if err != nil {
 		return nil, err
@@ -39,7 +35,7 @@ func NewClient(id string, host string, port int32) (client *Client, err error) {
 	client = new(Client)
 	client.id = id
 
-	client.brokers = make(map[int32]*k.Broker)
+	client.brokers = make(map[int32]*Broker)
 	client.leaders = make(map[string]map[int32]int32)
 
 	// add it to the set so that refreshTopics can find it
@@ -81,7 +77,7 @@ func (client *Client) Close() {
 // functions for use by producers and consumers
 // if Go had the concept they would be marked 'protected'
 
-func (client *Client) leader(topic string, partition_id int32) (*k.Broker, error) {
+func (client *Client) leader(topic string, partition_id int32) (*Broker, error) {
 	leader := client.cachedLeader(topic, partition_id)
 
 	if leader == nil {
@@ -93,7 +89,7 @@ func (client *Client) leader(topic string, partition_id int32) (*k.Broker, error
 	}
 
 	if leader == nil {
-		return nil, types.UNKNOWN_TOPIC_OR_PARTITION
+		return nil, UNKNOWN_TOPIC_OR_PARTITION
 	}
 
 	return leader, nil
@@ -117,7 +113,7 @@ func (client *Client) partitions(topic string) ([]int32, error) {
 	return partitions, nil
 }
 
-func (client *Client) disconnectBroker(broker *k.Broker) {
+func (client *Client) disconnectBroker(broker *Broker) {
 	client.lock.Lock()
 	defer client.lock.Unlock()
 
@@ -138,7 +134,7 @@ func (client *Client) refreshTopic(topic string) error {
 
 func (client *Client) refreshTopics(topics []string, retries int) error {
 	for broker := client.any(); broker != nil; broker = client.any() {
-		response, err := broker.GetMetadata(client.id, &k.MetadataRequest{Topics: topics})
+		response, err := broker.GetMetadata(client.id, &MetadataRequest{Topics: topics})
 
 		switch err {
 		case nil:
@@ -151,12 +147,12 @@ func (client *Client) refreshTopics(topics []string, retries int) error {
 				return nil
 			default:
 				if retries <= 0 {
-					return types.LEADER_NOT_AVAILABLE
+					return LEADER_NOT_AVAILABLE
 				}
 				time.Sleep(250 * time.Millisecond) // wait for leader election
 				return client.refreshTopics(retry, retries-1)
 			}
-		case encoding.EncodingError:
+		case EncodingError:
 			// didn't even send, return the error
 			return err
 		}
@@ -168,7 +164,7 @@ func (client *Client) refreshTopics(topics []string, retries int) error {
 	return OutOfBrokers
 }
 
-func (client *Client) any() *k.Broker {
+func (client *Client) any() *Broker {
 	client.lock.RLock()
 	defer client.lock.RUnlock()
 
@@ -179,7 +175,7 @@ func (client *Client) any() *k.Broker {
 	return nil
 }
 
-func (client *Client) cachedLeader(topic string, partition_id int32) *k.Broker {
+func (client *Client) cachedLeader(topic string, partition_id int32) *Broker {
 	client.lock.RLock()
 	defer client.lock.RUnlock()
 
@@ -213,11 +209,11 @@ func (client *Client) cachedPartitions(topic string) []int32 {
 }
 
 // if no fatal error, returns a list of topics that need retrying due to LEADER_NOT_AVAILABLE
-func (client *Client) update(data *k.MetadataResponse) ([]string, error) {
+func (client *Client) update(data *MetadataResponse) ([]string, error) {
 	// First discard brokers that we already know about. This avoids bouncing TCP connections,
 	// and especially avoids closing valid connections out from under other code which may be trying
 	// to use them. We only need a read-lock for this.
-	var newBrokers []*k.Broker
+	var newBrokers []*Broker
 	client.lock.RLock()
 	for _, broker := range data.Brokers {
 		if !broker.Equals(client.brokers[broker.ID()]) {
@@ -249,9 +245,9 @@ func (client *Client) update(data *k.MetadataResponse) ([]string, error) {
 
 	for _, topic := range data.Topics {
 		switch topic.Err {
-		case types.NO_ERROR:
+		case NO_ERROR:
 			break
-		case types.LEADER_NOT_AVAILABLE:
+		case LEADER_NOT_AVAILABLE:
 			toRetry[topic.Name] = true
 		default:
 			return nil, topic.Err
@@ -259,13 +255,13 @@ func (client *Client) update(data *k.MetadataResponse) ([]string, error) {
 		client.leaders[topic.Name] = make(map[int32]int32, len(topic.Partitions))
 		for _, partition := range topic.Partitions {
 			switch partition.Err {
-			case types.LEADER_NOT_AVAILABLE:
+			case LEADER_NOT_AVAILABLE:
 				// in the LEADER_NOT_AVAILABLE case partition.Leader will be -1 because the
 				// partition is in the middle of leader election, so we fallthrough to save it
 				// anyways in order to avoid returning the stale leader (since -1 isn't a valid broker ID)
 				toRetry[topic.Name] = true
 				fallthrough
-			case types.NO_ERROR:
+			case NO_ERROR:
 				client.leaders[topic.Name][partition.Id] = partition.Leader
 			default:
 				return nil, partition.Err

+ 7 - 8
kafka/client_test.go → client_test.go

@@ -2,13 +2,12 @@ package kafka
 
 import (
 	"encoding/binary"
-	"sarama/mock"
 	"testing"
 )
 
 func TestSimpleClient(t *testing.T) {
 	responses := make(chan []byte, 1)
-	mockBroker := mock.NewBroker(t, responses)
+	mockBroker := NewMockBroker(t, responses)
 	defer mockBroker.Close()
 
 	// Only one response needed, an empty metadata response
@@ -23,8 +22,8 @@ func TestSimpleClient(t *testing.T) {
 
 func TestClientExtraBrokers(t *testing.T) {
 	responses := make(chan []byte, 1)
-	mockBroker := mock.NewBroker(t, responses)
-	mockExtra := mock.NewBroker(t, make(chan []byte))
+	mockBroker := NewMockBroker(t, responses)
+	mockExtra := NewMockBroker(t, make(chan []byte))
 	defer mockBroker.Close()
 	defer mockExtra.Close()
 
@@ -47,8 +46,8 @@ func TestClientExtraBrokers(t *testing.T) {
 
 func TestClientMetadata(t *testing.T) {
 	responses := make(chan []byte, 1)
-	mockBroker := mock.NewBroker(t, responses)
-	mockExtra := mock.NewBroker(t, make(chan []byte))
+	mockBroker := NewMockBroker(t, responses)
+	mockExtra := NewMockBroker(t, make(chan []byte))
 	defer mockBroker.Close()
 	defer mockExtra.Close()
 
@@ -95,8 +94,8 @@ func TestClientMetadata(t *testing.T) {
 
 func TestClientRefreshBehaviour(t *testing.T) {
 	responses := make(chan []byte, 3)
-	mockBroker := mock.NewBroker(t, responses)
-	mockExtra := mock.NewBroker(t, make(chan []byte))
+	mockBroker := NewMockBroker(t, responses)
+	mockExtra := NewMockBroker(t, make(chan []byte))
 	defer mockBroker.Close()
 	defer mockExtra.Close()
 

+ 5 - 11
kafka/consumer.go → consumer.go

@@ -1,11 +1,5 @@
 package kafka
 
-import k "sarama/protocol"
-import (
-	"sarama/encoding"
-	"sarama/types"
-)
-
 // Consumer processes Kafka messages from a given topic and partition.
 // You MUST call Close() on a consumer to avoid leaks, it will not be garbage-collected automatically when
 // it passes out of scope (this is in addition to calling Close on the underlying client, which is still necessary).
@@ -17,7 +11,7 @@ type Consumer struct {
 	group     string
 
 	offset        int64
-	broker        *k.Broker
+	broker        *Broker
 	stopper, done chan bool
 	messages      chan *Message
 	errors        chan error
@@ -93,7 +87,7 @@ func (c *Consumer) fetchMessages() {
 	var fetchSize int32 = 1024
 
 	for {
-		request := new(k.FetchRequest)
+		request := new(FetchRequest)
 		request.MinBytes = 1
 		request.MaxWaitTime = 1000
 		request.AddBlock(c.topic, c.partition, c.offset, fetchSize)
@@ -102,7 +96,7 @@ func (c *Consumer) fetchMessages() {
 		switch {
 		case err == nil:
 			break
-		case err == encoding.EncodingError:
+		case err == EncodingError:
 			if c.sendError(err) {
 				continue
 			} else {
@@ -127,9 +121,9 @@ func (c *Consumer) fetchMessages() {
 		}
 
 		switch block.Err {
-		case types.NO_ERROR:
+		case NO_ERROR:
 			break
-		case types.UNKNOWN_TOPIC_OR_PARTITION, types.NOT_LEADER_FOR_PARTITION, types.LEADER_NOT_AVAILABLE:
+		case UNKNOWN_TOPIC_OR_PARTITION, NOT_LEADER_FOR_PARTITION, LEADER_NOT_AVAILABLE:
 			err = c.client.refreshTopic(c.topic)
 			if c.sendError(err) {
 				for c.broker = nil; err != nil; c.broker, err = c.client.leader(c.topic, c.partition) {

+ 2 - 3
kafka/consumer_test.go → consumer_test.go

@@ -3,7 +3,6 @@ package kafka
 import (
 	"encoding/binary"
 	"fmt"
-	"sarama/mock"
 	"testing"
 	"time"
 )
@@ -11,8 +10,8 @@ import (
 func TestSimpleConsumer(t *testing.T) {
 	masterResponses := make(chan []byte, 1)
 	extraResponses := make(chan []byte)
-	mockBroker := mock.NewBroker(t, masterResponses)
-	mockExtra := mock.NewBroker(t, extraResponses)
+	mockBroker := NewMockBroker(t, masterResponses)
+	mockExtra := NewMockBroker(t, extraResponses)
 	defer mockBroker.Close()
 	defer mockExtra.Close()
 

+ 15 - 0
errors.go

@@ -2,6 +2,21 @@ package kafka
 
 import "errors"
 
+// OutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var OutOfBrokers = errors.New("kafka: Client has run out of available brokers to talk to. Is your cluster reachable?")
+
+// NoSuchTopic is the error returned when the supplied topic is rejected by the Kafka servers.
+var NoSuchTopic = errors.New("kafka: Topic not recognized by brokers.")
+
+// IncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var IncompleteResponse = errors.New("kafka: Response did not contain all the expected topic/partition blocks.")
+
+// InvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var InvalidPartition = errors.New("kafka: Partitioner returned an invalid partition index.")
+
 // AlreadyConnected is the error returned when calling Connect() on a Broker that is already connected.
 var AlreadyConnected = errors.New("kafka: broker: already connected")
 

+ 0 - 18
kafka/errors.go

@@ -1,18 +0,0 @@
-package kafka
-
-import "errors"
-
-// OutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
-// or otherwise failed to respond.
-var OutOfBrokers = errors.New("kafka: Client has run out of available brokers to talk to. Is your cluster reachable?")
-
-// NoSuchTopic is the error returned when the supplied topic is rejected by the Kafka servers.
-var NoSuchTopic = errors.New("kafka: Topic not recognized by brokers.")
-
-// IncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
-// not contain the expected information.
-var IncompleteResponse = errors.New("kafka: Response did not contain all the expected topic/partition blocks.")
-
-// InvalidPartition is the error returned when a partitioner returns an invalid partition index
-// (meaning one outside of the range [0...numPartitions-1]).
-var InvalidPartition = errors.New("kafka: Partitioner returned an invalid partition index.")

+ 4 - 3
message.go

@@ -20,9 +20,10 @@ const (
 const message_format int8 = 0
 
 type Message struct {
-	Codec CompressionCodec // codec used to compress the message contents
-	Key   []byte           // the message key, may be nil
-	Value []byte           // the message contents
+	Codec  CompressionCodec // codec used to compress the message contents
+	Key    []byte           // the message key, may be nil
+	Value  []byte           // the message contents
+	Offset int64            // the offset of this message (not usually set)
 }
 
 func (m *Message) encode(pe packetEncoder) error {

+ 0 - 0
kafka/partitioner.go → partitioner.go


+ 0 - 0
kafka/partitioner_test.go → partitioner_test.go


+ 5 - 11
kafka/producer.go → producer.go

@@ -1,11 +1,5 @@
 package kafka
 
-import k "sarama/protocol"
-import (
-	"sarama/encoding"
-	"sarama/types"
-)
-
 // Producer publishes Kafka messages on a given topic. It routes messages to the correct broker, refreshing metadata as appropriate,
 // and parses responses for errors. A Producer itself does not need to be closed (thus no Close method) but you still need to close
 // its underlying Client.
@@ -78,14 +72,14 @@ func (p *Producer) safeSendMessage(key, value Encoder, retry bool) error {
 		return err
 	}
 
-	request := &k.ProduceRequest{RequiredAcks: types.WAIT_FOR_LOCAL, Timeout: 0}
-	request.AddMessage(p.topic, partition, &k.Message{Key: keyBytes, Value: valBytes})
+	request := &ProduceRequest{RequiredAcks: WAIT_FOR_LOCAL, Timeout: 0}
+	request.AddMessage(p.topic, partition, &Message{Key: keyBytes, Value: valBytes})
 
 	response, err := broker.Produce(p.client.id, request)
 	switch err {
 	case nil:
 		break
-	case encoding.EncodingError:
+	case EncodingError:
 		return err
 	default:
 		if !retry {
@@ -105,9 +99,9 @@ func (p *Producer) safeSendMessage(key, value Encoder, retry bool) error {
 	}
 
 	switch block.Err {
-	case types.NO_ERROR:
+	case NO_ERROR:
 		return nil
-	case types.UNKNOWN_TOPIC_OR_PARTITION, types.NOT_LEADER_FOR_PARTITION, types.LEADER_NOT_AVAILABLE:
+	case UNKNOWN_TOPIC_OR_PARTITION, NOT_LEADER_FOR_PARTITION, LEADER_NOT_AVAILABLE:
 		if !retry {
 			return block.Err
 		}

+ 2 - 3
kafka/producer_test.go → producer_test.go

@@ -3,15 +3,14 @@ package kafka
 import (
 	"encoding/binary"
 	"fmt"
-	"sarama/mock"
 	"testing"
 )
 
 func TestSimpleProducer(t *testing.T) {
 	responses := make(chan []byte, 1)
 	extraResponses := make(chan []byte)
-	mockBroker := mock.NewBroker(t, responses)
-	mockExtra := mock.NewBroker(t, extraResponses)
+	mockBroker := NewMockBroker(t, responses)
+	mockExtra := NewMockBroker(t, extraResponses)
 	defer mockBroker.Close()
 	defer mockExtra.Close()
 

+ 0 - 11
kafka/utils.go → utils.go

@@ -39,14 +39,3 @@ type ByteEncoder []byte
 func (b ByteEncoder) Encode() ([]byte, error) {
 	return b, nil
 }
-
-// create a message struct to return from high-level fetch requests
-// we could in theory use sarama/protocol/message.go but that has to match the
-// wire protocol, which doesn't quite line up with what we actually need to return
-
-// Message is what is returned from fetch requests.
-type Message struct {
-	Offset int64
-	Key    []byte
-	Value  []byte
-}