瀏覽代碼

Merge pull request #31 from aybabtme/master

Fix `golint` and `go vet` errors, removes a couple of magic numbers. Flatten nested ifs.
Evan Huus 12 年之前
父節點
當前提交
f39d3ce036

+ 21 - 24
broker.go

@@ -12,28 +12,25 @@ type Broker struct {
 	id   int32
 	addr string
 
-	correlation_id int32
-	conn           net.Conn
-	conn_err       error
-	lock           sync.Mutex
+	correlationID int32
+	conn          net.Conn
+	connErr       error
+	lock          sync.Mutex
 
 	responses chan responsePromise
 	done      chan bool
 }
 
 type responsePromise struct {
-	correlation_id int32
-	packets        chan []byte
-	errors         chan error
+	correlationID int32
+	packets       chan []byte
+	errors        chan error
 }
 
 // NewBroker creates and returns a Broker targetting the given host:port address.
 // This does not attempt to actually connect, you have to call Open() for that.
 func NewBroker(addr string) *Broker {
-	b := new(Broker)
-	b.id = -1 // don't know it yet
-	b.addr = addr
-	return b
+	return &Broker{id: -1, addr: addr}
 }
 
 // Open tries to connect to the Broker. It takes the broker lock synchronously, then spawns a goroutine which
@@ -51,8 +48,8 @@ func (b *Broker) Open() error {
 	go func() {
 		defer b.lock.Unlock()
 
-		b.conn, b.conn_err = net.Dial("tcp", b.addr)
-		if b.conn_err != nil {
+		b.conn, b.connErr = net.Dial("tcp", b.addr)
+		if b.connErr != nil {
 			return
 		}
 
@@ -73,7 +70,7 @@ func (b *Broker) Connected() (bool, error) {
 	b.lock.Lock()
 	defer b.lock.Unlock()
 
-	return b.conn != nil, b.conn_err
+	return b.conn != nil, b.connErr
 }
 
 func (b *Broker) Close() error {
@@ -90,7 +87,7 @@ func (b *Broker) Close() error {
 	err := b.conn.Close()
 
 	b.conn = nil
-	b.conn_err = nil
+	b.connErr = nil
 	b.done = nil
 	b.responses = nil
 
@@ -135,7 +132,7 @@ func (b *Broker) Produce(clientID string, request *ProduceRequest) (*ProduceResp
 	var response *ProduceResponse
 	var err error
 
-	if request.RequiredAcks == NO_RESPONSE {
+	if request.RequiredAcks == NoResponse {
 		err = b.sendAndReceive(clientID, request, nil)
 	} else {
 		response = new(ProduceResponse)
@@ -190,14 +187,13 @@ func (b *Broker) send(clientID string, req requestEncoder, promiseResponse bool)
 	defer b.lock.Unlock()
 
 	if b.conn == nil {
-		if b.conn_err != nil {
-			return nil, b.conn_err
-		} else {
-			return nil, NotConnected
+		if b.connErr != nil {
+			return nil, b.connErr
 		}
+		return nil, NotConnected
 	}
 
-	fullRequest := request{b.correlation_id, clientID, req}
+	fullRequest := request{b.correlationID, clientID, req}
 	buf, err := encode(&fullRequest)
 	if err != nil {
 		return nil, err
@@ -207,13 +203,13 @@ func (b *Broker) send(clientID string, req requestEncoder, promiseResponse bool)
 	if err != nil {
 		return nil, err
 	}
-	b.correlation_id++
+	b.correlationID++
 
 	if !promiseResponse {
 		return nil, nil
 	}
 
-	promise := responsePromise{fullRequest.correlation_id, make(chan []byte), make(chan error)}
+	promise := responsePromise{fullRequest.correlationID, make(chan []byte), make(chan error)}
 	b.responses <- promise
 
 	return &promise, nil
@@ -237,6 +233,7 @@ func (b *Broker) sendAndReceive(clientID string, req requestEncoder, res decoder
 		return err
 	}
 
+	// For backward compatibility with go1.0
 	return nil
 }
 
@@ -276,7 +273,7 @@ func (b *Broker) responseReceiver() {
 			response.errors <- err
 			continue
 		}
-		if decodedHeader.correlation_id != response.correlation_id {
+		if decodedHeader.correlationID != response.correlationID {
 			response.errors <- DecodingError
 			continue
 		}

+ 4 - 4
broker_test.go

@@ -225,26 +225,26 @@ var brokerTestTable = []struct {
 	{[]byte{},
 		func(t *testing.T, broker *Broker) {
 			request := ProduceRequest{}
-			request.RequiredAcks = NO_RESPONSE
+			request.RequiredAcks = NoResponse
 			response, err := broker.Produce("clientID", &request)
 			if err != nil {
 				t.Error(err)
 			}
 			if response != nil {
-				t.Error("Produce request with NO_RESPONSE got a response!")
+				t.Error("Produce request with NoResponse got a response!")
 			}
 		}},
 
 	{[]byte{0x00, 0x00, 0x00, 0x00},
 		func(t *testing.T, broker *Broker) {
 			request := ProduceRequest{}
-			request.RequiredAcks = WAIT_FOR_LOCAL
+			request.RequiredAcks = WaitForLocal
 			response, err := broker.Produce("clientID", &request)
 			if err != nil {
 				t.Error(err)
 			}
 			if response == nil {
-				t.Error("Produce request without NO_RESPONSE got no response!")
+				t.Error("Produce request without NoResponse got no response!")
 			}
 		}},
 

+ 27 - 27
client.go

@@ -34,7 +34,7 @@ type Client struct {
 // NewClient creates a new Client with the given client ID. It connects to one of the given broker addresses
 // and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
 // be retrieved from any of the given broker addresses, the client is not created.
-func NewClient(id string, addrs []string, config *ClientConfig) (client *Client, err error) {
+func NewClient(id string, addrs []string, config *ClientConfig) (*Client, error) {
 	if config == nil {
 		config = new(ClientConfig)
 	}
@@ -47,18 +47,18 @@ func NewClient(id string, addrs []string, config *ClientConfig) (client *Client,
 		return nil, ConfigurationError("You must provide at least one broker address")
 	}
 
-	client = new(Client)
-	client.id = id
-	client.config = *config
-	client.extraBrokerAddrs = addrs
-	client.extraBroker = NewBroker(client.extraBrokerAddrs[0])
+	client := &Client{
+		id:               id,
+		config:           *config,
+		extraBrokerAddrs: addrs,
+		extraBroker:      NewBroker(addrs[0]),
+		brokers:          make(map[int32]*Broker),
+		leaders:          make(map[string]map[int32]int32),
+	}
 	client.extraBroker.Open()
 
-	client.brokers = make(map[int32]*Broker)
-	client.leaders = make(map[string]map[int32]int32)
-
 	// do an initial fetch of all cluster metadata by specifing an empty list of topics
-	err = client.RefreshAllMetadata()
+	err := client.RefreshAllMetadata()
 	if err != nil {
 		client.Close() // this closes tmp, since it's still in the brokers hash
 		return nil, err
@@ -112,7 +112,7 @@ func (client *Client) Topics() ([]string, error) {
 	defer client.lock.RUnlock()
 
 	ret := make([]string, 0, len(client.leaders))
-	for topic, _ := range client.leaders {
+	for topic := range client.leaders {
 		ret = append(ret, topic)
 	}
 
@@ -121,19 +121,19 @@ func (client *Client) Topics() ([]string, error) {
 
 // Leader returns the broker object that is the leader of the current topic/partition, as
 // determined by querying the cluster metadata.
-func (client *Client) Leader(topic string, partition_id int32) (*Broker, error) {
-	leader := client.cachedLeader(topic, partition_id)
+func (client *Client) Leader(topic string, partitionID int32) (*Broker, error) {
+	leader := client.cachedLeader(topic, partitionID)
 
 	if leader == nil {
 		err := client.RefreshTopicMetadata(topic)
 		if err != nil {
 			return nil, err
 		}
-		leader = client.cachedLeader(topic, partition_id)
+		leader = client.cachedLeader(topic, partitionID)
 	}
 
 	if leader == nil {
-		return nil, UNKNOWN_TOPIC_OR_PARTITION
+		return nil, UnknownTopicOrPartition
 	}
 
 	return leader, nil
@@ -193,7 +193,7 @@ func (client *Client) refreshMetadata(topics []string, retries int) error {
 				return nil
 			default:
 				if retries <= 0 {
-					return LEADER_NOT_AVAILABLE
+					return LeaderNotAvailable
 				}
 				time.Sleep(client.config.WaitForElection) // wait for leader election
 				return client.refreshMetadata(retry, retries-1)
@@ -221,13 +221,13 @@ func (client *Client) any() *Broker {
 	return client.extraBroker
 }
 
-func (client *Client) cachedLeader(topic string, partition_id int32) *Broker {
+func (client *Client) cachedLeader(topic string, partitionID int32) *Broker {
 	client.lock.RLock()
 	defer client.lock.RUnlock()
 
 	partitions := client.leaders[topic]
 	if partitions != nil {
-		leader, ok := partitions[partition_id]
+		leader, ok := partitions[partitionID]
 		if ok {
 			return client.brokers[leader]
 		}
@@ -246,7 +246,7 @@ func (client *Client) cachedPartitions(topic string) []int32 {
 	}
 
 	ret := make([]int32, 0, len(partitions))
-	for id, _ := range partitions {
+	for id := range partitions {
 		ret = append(ret, id)
 	}
 
@@ -254,7 +254,7 @@ func (client *Client) cachedPartitions(topic string) []int32 {
 	return ret
 }
 
-// if no fatal error, returns a list of topics that need retrying due to LEADER_NOT_AVAILABLE
+// if no fatal error, returns a list of topics that need retrying due to LeaderNotAvailable
 func (client *Client) update(data *MetadataResponse) ([]string, error) {
 	client.lock.Lock()
 	defer client.lock.Unlock()
@@ -281,9 +281,9 @@ func (client *Client) update(data *MetadataResponse) ([]string, error) {
 
 	for _, topic := range data.Topics {
 		switch topic.Err {
-		case NO_ERROR:
+		case NoError:
 			break
-		case LEADER_NOT_AVAILABLE:
+		case LeaderNotAvailable:
 			toRetry[topic.Name] = true
 		default:
 			return nil, topic.Err
@@ -291,11 +291,11 @@ func (client *Client) update(data *MetadataResponse) ([]string, error) {
 		client.leaders[topic.Name] = make(map[int32]int32, len(topic.Partitions))
 		for _, partition := range topic.Partitions {
 			switch partition.Err {
-			case LEADER_NOT_AVAILABLE:
+			case LeaderNotAvailable:
 				toRetry[topic.Name] = true
-				delete(client.leaders[topic.Name], partition.Id)
-			case NO_ERROR:
-				client.leaders[topic.Name][partition.Id] = partition.Leader
+				delete(client.leaders[topic.Name], partition.ID)
+			case NoError:
+				client.leaders[topic.Name][partition.ID] = partition.Leader
 			default:
 				return nil, partition.Err
 			}
@@ -303,7 +303,7 @@ func (client *Client) update(data *MetadataResponse) ([]string, error) {
 	}
 
 	ret := make([]string, 0, len(toRetry))
-	for topic, _ := range toRetry {
+	for topic := range toRetry {
 		ret = append(ret, topic)
 	}
 	return ret, nil

+ 28 - 27
consumer.go

@@ -4,15 +4,15 @@ package sarama
 type OffsetMethod int
 
 const (
-	// OFFSET_METHOD_MANUAL causes the consumer to interpret the OffsetValue in the ConsumerConfig as the
+	// OffsetMethodManual causes the consumer to interpret the OffsetValue in the ConsumerConfig as the
 	// offset at which to start, allowing the user to manually specify their desired starting offset.
-	OFFSET_METHOD_MANUAL OffsetMethod = iota
-	// OFFSET_METHOD_NEWEST causes the consumer to start at the most recent available offset, as
+	OffsetMethodManual OffsetMethod = iota
+	// OffsetMethodNewest causes the consumer to start at the most recent available offset, as
 	// determined by querying the broker.
-	OFFSET_METHOD_NEWEST
-	// OFFSET_METHOD_OLDEST causes the consumer to start at the oldest available offset, as
+	OffsetMethodNewest
+	// OffsetMethodOldest causes the consumer to start at the oldest available offset, as
 	// determined by querying the broker.
-	OFFSET_METHOD_OLDEST
+	OffsetMethodOldest
 )
 
 // ConsumerConfig is used to pass multiple configuration options to NewConsumer.
@@ -101,27 +101,31 @@ func NewConsumer(client *Client, topic string, partition int32, group string, co
 		return nil, err
 	}
 
-	c := new(Consumer)
-	c.client = client
-	c.topic = topic
-	c.partition = partition
-	c.group = group
-	c.config = *config
-	c.broker = broker
+	c := &Consumer{
+		client:    client,
+		topic:     topic,
+		partition: partition,
+		group:     group,
+		config:    *config,
+		broker:    broker,
+		stopper:   make(chan bool),
+		done:      make(chan bool),
+		events:    make(chan *ConsumerEvent, config.EventBufferSize),
+	}
 
 	switch config.OffsetMethod {
-	case OFFSET_METHOD_MANUAL:
+	case OffsetMethodManual:
 		if config.OffsetValue < 0 {
 			return nil, ConfigurationError("OffsetValue cannot be < 0 when OffsetMethod is MANUAL")
 		}
 		c.offset = config.OffsetValue
-	case OFFSET_METHOD_NEWEST:
-		c.offset, err = c.getOffset(LATEST_OFFSETS, true)
+	case OffsetMethodNewest:
+		c.offset, err = c.getOffset(LatestOffsets, true)
 		if err != nil {
 			return nil, err
 		}
-	case OFFSET_METHOD_OLDEST:
-		c.offset, err = c.getOffset(EARLIEST_OFFSET, true)
+	case OffsetMethodOldest:
+		c.offset, err = c.getOffset(EarliestOffset, true)
 		if err != nil {
 			return nil, err
 		}
@@ -129,10 +133,6 @@ func NewConsumer(client *Client, topic string, partition int32, group string, co
 		return nil, ConfigurationError("Invalid OffsetMethod")
 	}
 
-	c.stopper = make(chan bool)
-	c.done = make(chan bool)
-	c.events = make(chan *ConsumerEvent, config.EventBufferSize)
-
 	go c.fetchMessages()
 
 	return c, nil
@@ -169,12 +169,13 @@ func (c *Consumer) sendError(err error) bool {
 		return true
 	}
 
+	// For backward compatibility with go1.0
 	return true
 }
 
 func (c *Consumer) fetchMessages() {
 
-	var fetchSize int32 = c.config.DefaultFetchSize
+	fetchSize := c.config.DefaultFetchSize
 
 	for {
 		request := new(FetchRequest)
@@ -212,9 +213,9 @@ func (c *Consumer) fetchMessages() {
 		}
 
 		switch block.Err {
-		case NO_ERROR:
+		case NoError:
 			break
-		case UNKNOWN_TOPIC_OR_PARTITION, NOT_LEADER_FOR_PARTITION, LEADER_NOT_AVAILABLE:
+		case UnknownTopicOrPartition, NotLeaderForPartition, LeaderNotAvailable:
 			err = c.client.RefreshTopicMetadata(c.topic)
 			if c.sendError(err) {
 				for c.broker, err = c.client.Leader(c.topic, c.partition); err != nil; c.broker, err = c.client.Leader(c.topic, c.partition) {
@@ -308,12 +309,12 @@ func (c *Consumer) getOffset(where OffsetTime, retry bool) (int64, error) {
 	}
 
 	switch block.Err {
-	case NO_ERROR:
+	case NoError:
 		if len(block.Offsets) < 1 {
 			return -1, IncompleteResponse
 		}
 		return block.Offsets[0], nil
-	case UNKNOWN_TOPIC_OR_PARTITION, NOT_LEADER_FOR_PARTITION, LEADER_NOT_AVAILABLE:
+	case UnknownTopicOrPartition, NotLeaderForPartition, LeaderNotAvailable:
 		if !retry {
 			return -1, block.Err
 		}

+ 2 - 2
consumer_test.go

@@ -116,7 +116,7 @@ func TestConsumerRawOffset(t *testing.T) {
 	}
 	defer client.Close()
 
-	consumer, err := NewConsumer(client, "my_topic", 0, "my_consumer_group", &ConsumerConfig{OffsetMethod: OFFSET_METHOD_MANUAL, OffsetValue: 1234})
+	consumer, err := NewConsumer(client, "my_topic", 0, "my_consumer_group", &ConsumerConfig{OffsetMethod: OffsetMethodManual, OffsetValue: 1234})
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -157,7 +157,7 @@ func TestConsumerLatestOffset(t *testing.T) {
 	}
 	defer client.Close()
 
-	consumer, err := NewConsumer(client, "my_topic", 0, "my_consumer_group", &ConsumerConfig{OffsetMethod: OFFSET_METHOD_NEWEST})
+	consumer, err := NewConsumer(client, "my_topic", 0, "my_consumer_group", &ConsumerConfig{OffsetMethod: OffsetMethodNewest})
 	if err != nil {
 		t.Fatal(err)
 	}

+ 34 - 30
errors.go

@@ -1,6 +1,9 @@
 package sarama
 
-import "errors"
+import (
+	"errors"
+	"fmt"
+)
 
 // OutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
 // or otherwise failed to respond.
@@ -51,56 +54,57 @@ func (err ConfigurationError) Error() string {
 // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
 type KError int16
 
+// Numeric error codes returned by the Kafka server.
 const (
-	NO_ERROR                    KError = 0
-	UNKNOWN                     KError = -1
-	OFFSET_OUT_OF_RANGE         KError = 1
-	INVALID_MESSAGE             KError = 2
-	UNKNOWN_TOPIC_OR_PARTITION  KError = 3
-	INVALID_MESSAGE_SIZE        KError = 4
-	LEADER_NOT_AVAILABLE        KError = 5
-	NOT_LEADER_FOR_PARTITION    KError = 6
-	REQUEST_TIMED_OUT           KError = 7
-	BROKER_NOT_AVAILABLE        KError = 8
-	REPLICA_NOT_AVAILABLE       KError = 9
-	MESSAGE_SIZE_TOO_LARGE      KError = 10
-	STALE_CONTROLLER_EPOCH_CODE KError = 11
-	OFFSET_METADATA_TOO_LARGE   KError = 12
+	NoError                  KError = 0
+	Unknown                  KError = -1
+	OffsetOutOfRange         KError = 1
+	InvalidMessage           KError = 2
+	UnknownTopicOrPartition  KError = 3
+	InvalidMessageSize       KError = 4
+	LeaderNotAvailable       KError = 5
+	NotLeaderForPartition    KError = 6
+	RequestTimedOut          KError = 7
+	BrokerNotAvailable       KError = 8
+	ReplicaNotAvailable      KError = 9
+	MessageSizeTooLarge      KError = 10
+	StaleControllerEpochCode KError = 11
+	OffsetMetadataTooLarge   KError = 12
 )
 
 func (err KError) Error() string {
 	// Error messages stolen/adapted from
 	// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
 	switch err {
-	case NO_ERROR:
+	case NoError:
 		return "kafka server: Not an error, why are you printing me?"
-	case UNKNOWN:
+	case Unknown:
 		return "kafka server: Unexpected (unknown?) server error."
-	case OFFSET_OUT_OF_RANGE:
+	case OffsetOutOfRange:
 		return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
-	case INVALID_MESSAGE:
+	case InvalidMessage:
 		return "kafka server: Message contents does not match its CRC."
-	case UNKNOWN_TOPIC_OR_PARTITION:
+	case UnknownTopicOrPartition:
 		return "kafka server: Request was for a topic or partition that does not exist on this broker."
-	case INVALID_MESSAGE_SIZE:
+	case InvalidMessageSize:
 		return "kafka server: The message has a negative size."
-	case LEADER_NOT_AVAILABLE:
+	case LeaderNotAvailable:
 		return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
-	case NOT_LEADER_FOR_PARTITION:
+	case NotLeaderForPartition:
 		return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
-	case REQUEST_TIMED_OUT:
+	case RequestTimedOut:
 		return "kafka server: Request exceeded the user-specified time limit in the request."
-	case BROKER_NOT_AVAILABLE:
+	case BrokerNotAvailable:
 		return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
-	case REPLICA_NOT_AVAILABLE:
+	case ReplicaNotAvailable:
 		return "kafka server: Replica not available. What is the difference between this and LeaderNotAvailable?"
-	case MESSAGE_SIZE_TOO_LARGE:
+	case MessageSizeTooLarge:
 		return "kafka server: Message was too large, server rejected it to avoid allocation error."
-	case STALE_CONTROLLER_EPOCH_CODE:
+	case StaleControllerEpochCode:
 		return "kafka server: Stale controller epoch code. ???"
-	case OFFSET_METADATA_TOO_LARGE:
+	case OffsetMetadataTooLarge:
 		return "kafka server: Specified a string larger than the configured maximum for offset metadata."
 	}
 
-	return "Unknown error, how did this happen?"
+	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
 }

+ 2 - 2
fetch_request.go

@@ -53,7 +53,7 @@ func (f *FetchRequest) version() int16 {
 	return 0
 }
 
-func (f *FetchRequest) AddBlock(topic string, partition_id int32, fetchOffset int64, maxBytes int32) {
+func (f *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
 	if f.blocks == nil {
 		f.blocks = make(map[string]map[int32]*fetchRequestBlock)
 	}
@@ -66,5 +66,5 @@ func (f *FetchRequest) AddBlock(topic string, partition_id int32, fetchOffset in
 	tmp.maxBytes = maxBytes
 	tmp.fetchOffset = fetchOffset
 
-	f.blocks[topic][partition_id] = tmp
+	f.blocks[topic][partitionID] = tmp
 }

+ 38 - 39
fetch_response_test.go

@@ -42,44 +42,43 @@ func TestOneMessageFetchResponse(t *testing.T) {
 	response := FetchResponse{}
 	testDecodable(t, "one message", &response, oneMessageFetchResponse)
 
-	if len(response.Blocks) == 1 {
-		if len(response.Blocks["topic"]) == 1 {
-			block := response.GetBlock("topic", 5)
-			if block != nil {
-				if block.Err != OFFSET_OUT_OF_RANGE {
-					t.Error("Decoding didn't produce correct error code.")
-				}
-				if block.HighWaterMarkOffset != 0x10101010 {
-					t.Error("Decoding didn't produce correct high water mark offset.")
-				}
-				if block.MsgSet.PartialTrailingMessage {
-					t.Error("Decoding detected a partial trailing message where there wasn't one.")
-				}
-				if len(block.MsgSet.Messages) == 1 {
-					msgBlock := block.MsgSet.Messages[0]
-					if msgBlock.Offset != 0x550000 {
-						t.Error("Decoding produced incorrect message offset.")
-					}
-					msg := msgBlock.Msg
-					if msg.Codec != COMPRESSION_NONE {
-						t.Error("Decoding produced incorrect message compression.")
-					}
-					if msg.Key != nil {
-						t.Error("Decoding produced message key where there was none.")
-					}
-					if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
-						t.Error("Decoding produced incorrect message value.")
-					}
-				} else {
-					t.Error("Decoding produced incorrect number of messages.")
-				}
-			} else {
-				t.Error("GetBlock didn't return block.")
-			}
-		} else {
-			t.Error("Decoding produced incorrect number of partition blocks for topic.")
-		}
-	} else {
-		t.Error("Decoding produced incorrect number of topic blocks.")
+	if len(response.Blocks) != 1 {
+		t.Fatal("Decoding produced incorrect number of topic blocks.")
+	}
+
+	if len(response.Blocks["topic"]) != 1 {
+		t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
+	}
+
+	block := response.GetBlock("topic", 5)
+	if block == nil {
+		t.Fatal("GetBlock didn't return block.")
+	}
+	if block.Err != OffsetOutOfRange {
+		t.Error("Decoding didn't produce correct error code.")
+	}
+	if block.HighWaterMarkOffset != 0x10101010 {
+		t.Error("Decoding didn't produce correct high water mark offset.")
+	}
+	if block.MsgSet.PartialTrailingMessage {
+		t.Error("Decoding detected a partial trailing message where there wasn't one.")
+	}
+
+	if len(block.MsgSet.Messages) != 1 {
+		t.Fatal("Decoding produced incorrect number of messages.")
+	}
+	msgBlock := block.MsgSet.Messages[0]
+	if msgBlock.Offset != 0x550000 {
+		t.Error("Decoding produced incorrect message offset.")
+	}
+	msg := msgBlock.Msg
+	if msg.Codec != CompressionNone {
+		t.Error("Decoding produced incorrect message compression.")
+	}
+	if msg.Key != nil {
+		t.Error("Decoding produced message key where there was none.")
+	}
+	if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
+		t.Error("Decoding produced incorrect message value.")
 	}
 }

+ 13 - 14
message.go

@@ -11,14 +11,14 @@ import (
 type CompressionCodec int8
 
 const (
-	COMPRESSION_NONE   CompressionCodec = 0
-	COMPRESSION_GZIP   CompressionCodec = 1
-	COMPRESSION_SNAPPY CompressionCodec = 2
+	CompressionNone   CompressionCodec = 0
+	CompressionGZIP   CompressionCodec = 1
+	CompressionSnappy CompressionCodec = 2
 )
 
 // The spec just says: "This is a version id used to allow backwards compatible evolution of the message
 // binary format." but it doesn't say what the current value is, so presumably 0...
-const message_format int8 = 0
+const messageFormat int8 = 0
 
 type Message struct {
 	Codec CompressionCodec // codec used to compress the message contents
@@ -31,10 +31,9 @@ type Message struct {
 func (m *Message) encode(pe packetEncoder) error {
 	pe.push(&crc32Field{})
 
-	pe.putInt8(message_format)
+	pe.putInt8(messageFormat)
 
-	var attributes int8 = 0
-	attributes |= int8(m.Codec) & 0x07
+	attributes := int8(m.Codec) & 0x07
 	pe.putInt8(attributes)
 
 	err := pe.putBytes(m.Key)
@@ -49,16 +48,16 @@ func (m *Message) encode(pe packetEncoder) error {
 		m.compressedCache = nil
 	} else {
 		switch m.Codec {
-		case COMPRESSION_NONE:
+		case CompressionNone:
 			payload = m.Value
-		case COMPRESSION_GZIP:
+		case CompressionGZIP:
 			var buf bytes.Buffer
 			writer := gzip.NewWriter(&buf)
 			writer.Write(m.Value)
 			writer.Close()
 			m.compressedCache = buf.Bytes()
 			payload = m.compressedCache
-		case COMPRESSION_SNAPPY:
+		case CompressionSnappy:
 			tmp, err := snappy.Encode(nil, m.Value)
 			if err != nil {
 				return err
@@ -88,7 +87,7 @@ func (m *Message) decode(pd packetDecoder) (err error) {
 	if err != nil {
 		return err
 	}
-	if format != message_format {
+	if format != messageFormat {
 		return DecodingError
 	}
 
@@ -109,9 +108,9 @@ func (m *Message) decode(pd packetDecoder) (err error) {
 	}
 
 	switch m.Codec {
-	case COMPRESSION_NONE:
+	case CompressionNone:
 		// nothing to do
-	case COMPRESSION_GZIP:
+	case CompressionGZIP:
 		if m.Value == nil {
 			return DecodingError
 		}
@@ -123,7 +122,7 @@ func (m *Message) decode(pd packetDecoder) (err error) {
 		if err != nil {
 			return err
 		}
-	case COMPRESSION_SNAPPY:
+	case CompressionSnappy:
 		if m.Value == nil {
 			return DecodingError
 		}

+ 3 - 3
message_test.go

@@ -27,14 +27,14 @@ func TestMessageEncoding(t *testing.T) {
 	testEncodable(t, "empty", &message, emptyMessage)
 
 	message.Value = []byte{}
-	message.Codec = COMPRESSION_GZIP
+	message.Codec = CompressionGZIP
 	testEncodable(t, "empty gzip", &message, emptyGzipMessage)
 }
 
 func TestMessageDecoding(t *testing.T) {
 	message := Message{}
 	testDecodable(t, "empty", &message, emptyMessage)
-	if message.Codec != COMPRESSION_NONE {
+	if message.Codec != CompressionNone {
 		t.Error("Decoding produced compression codec where there was none.")
 	}
 	if message.Key != nil {
@@ -45,7 +45,7 @@ func TestMessageDecoding(t *testing.T) {
 	}
 
 	testDecodable(t, "empty gzip", &message, emptyGzipMessage)
-	if message.Codec != COMPRESSION_GZIP {
+	if message.Codec != CompressionGZIP {
 		t.Error("Decoding produced incorrect compression codec (was gzip).")
 	}
 	if message.Key != nil {

+ 2 - 2
metadata_response.go

@@ -2,7 +2,7 @@ package sarama
 
 type PartitionMetadata struct {
 	Err      KError
-	Id       int32
+	ID       int32
 	Leader   int32
 	Replicas []int32
 	Isr      []int32
@@ -15,7 +15,7 @@ func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
 	}
 	pm.Err = KError(tmp)
 
-	pm.Id, err = pd.getInt32()
+	pm.ID, err = pd.getInt32()
 	if err != nil {
 		return err
 	}

+ 67 - 57
metadata_response_test.go

@@ -54,22 +54,23 @@ func TestMetadataResponseWithBrokers(t *testing.T) {
 	response := MetadataResponse{}
 
 	testDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse)
-	if len(response.Brokers) == 2 {
-		if response.Brokers[0].id != 0xabff {
-			t.Error("Decoding produced invalid broker 0 id.")
-		}
-		if response.Brokers[0].addr != "localhost:51" {
-			t.Error("Decoding produced invalid broker 0 address.")
-		}
-		if response.Brokers[1].id != 0x010203 {
-			t.Error("Decoding produced invalid broker 1 id.")
-		}
-		if response.Brokers[1].addr != "google.com:273" {
-			t.Error("Decoding produced invalid broker 1 address.")
-		}
-	} else {
-		t.Error("Decoding produced", len(response.Brokers), "brokers where there were two!")
+	if len(response.Brokers) != 2 {
+		t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!")
+	}
+
+	if response.Brokers[0].id != 0xabff {
+		t.Error("Decoding produced invalid broker 0 id.")
+	}
+	if response.Brokers[0].addr != "localhost:51" {
+		t.Error("Decoding produced invalid broker 0 address.")
+	}
+	if response.Brokers[1].id != 0x010203 {
+		t.Error("Decoding produced invalid broker 1 id.")
 	}
+	if response.Brokers[1].addr != "google.com:273" {
+		t.Error("Decoding produced invalid broker 1 address.")
+	}
+
 	if len(response.Topics) != 0 {
 		t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
 	}
@@ -82,48 +83,57 @@ func TestMetadataResponseWithTopics(t *testing.T) {
 	if len(response.Brokers) != 0 {
 		t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
 	}
-	if len(response.Topics) == 2 {
-		if response.Topics[0].Err != NO_ERROR {
-			t.Error("Decoding produced invalid topic 0 error.")
-		}
-		if response.Topics[0].Name != "foo" {
-			t.Error("Decoding produced invalid topic 0 name.")
-		}
-		if len(response.Topics[0].Partitions) == 1 {
-			if response.Topics[0].Partitions[0].Err != INVALID_MESSAGE_SIZE {
-				t.Error("Decoding produced invalid topic 0 partition 0 error.")
-			}
-			if response.Topics[0].Partitions[0].Id != 0x01 {
-				t.Error("Decoding produced invalid topic 0 partition 0 id.")
-			}
-			if response.Topics[0].Partitions[0].Leader != 0x07 {
-				t.Error("Decoding produced invalid topic 0 partition 0 leader.")
-			}
-			if len(response.Topics[0].Partitions[0].Replicas) == 3 {
-				for i := 0; i < 3; i++ {
-					if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) {
-						t.Error("Decoding produced invalid topic 0 partition 0 replica", i)
-					}
-				}
-			} else {
-				t.Error("Decoding produced invalid topic 0 partition 0 replicas.")
-			}
-			if len(response.Topics[0].Partitions[0].Isr) != 0 {
-				t.Error("Decoding produced invalid topic 0 partition 0 isr length.")
-			}
-		} else {
-			t.Error("Decoding produced invalid partition count for topic 0.")
-		}
-		if response.Topics[1].Err != NO_ERROR {
-			t.Error("Decoding produced invalid topic 1 error.")
-		}
-		if response.Topics[1].Name != "bar" {
-			t.Error("Decoding produced invalid topic 0 name.")
-		}
-		if len(response.Topics[1].Partitions) != 0 {
-			t.Error("Decoding produced invalid partition count for topic 1.")
+
+	if len(response.Topics) != 2 {
+		t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!")
+	}
+
+	if response.Topics[0].Err != NoError {
+		t.Error("Decoding produced invalid topic 0 error.")
+	}
+
+	if response.Topics[0].Name != "foo" {
+		t.Error("Decoding produced invalid topic 0 name.")
+	}
+
+	if len(response.Topics[0].Partitions) != 1 {
+		t.Fatal("Decoding produced invalid partition count for topic 0.")
+	}
+
+	if response.Topics[0].Partitions[0].Err != InvalidMessageSize {
+		t.Error("Decoding produced invalid topic 0 partition 0 error.")
+	}
+
+	if response.Topics[0].Partitions[0].ID != 0x01 {
+		t.Error("Decoding produced invalid topic 0 partition 0 id.")
+	}
+
+	if response.Topics[0].Partitions[0].Leader != 0x07 {
+		t.Error("Decoding produced invalid topic 0 partition 0 leader.")
+	}
+
+	if len(response.Topics[0].Partitions[0].Replicas) != 3 {
+		t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.")
+	}
+	for i := 0; i < 3; i++ {
+		if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) {
+			t.Error("Decoding produced invalid topic 0 partition 0 replica", i)
 		}
-	} else {
-		t.Error("Decoding produced", len(response.Topics), "topics where there were two!")
+	}
+
+	if len(response.Topics[0].Partitions[0].Isr) != 0 {
+		t.Error("Decoding produced invalid topic 0 partition 0 isr length.")
+	}
+
+	if response.Topics[1].Err != NoError {
+		t.Error("Decoding produced invalid topic 1 error.")
+	}
+
+	if response.Topics[1].Name != "bar" {
+		t.Error("Decoding produced invalid topic 0 name.")
+	}
+
+	if len(response.Topics[1].Partitions) != 0 {
+		t.Error("Decoding produced invalid partition count for topic 1.")
 	}
 }

+ 2 - 2
offset_commit_request.go

@@ -52,7 +52,7 @@ func (r *OffsetCommitRequest) version() int16 {
 	return 0
 }
 
-func (r *OffsetCommitRequest) AddBlock(topic string, partition_id int32, offset int64, metadata string) {
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, metadata string) {
 	if r.blocks == nil {
 		r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
 	}
@@ -65,5 +65,5 @@ func (r *OffsetCommitRequest) AddBlock(topic string, partition_id int32, offset
 	tmp.offset = offset
 	tmp.metadata = metadata
 
-	r.blocks[topic][partition_id] = tmp
+	r.blocks[topic][partitionID] = tmp
 }

+ 16 - 13
offset_commit_response_test.go

@@ -39,18 +39,21 @@ func TestNormalOffsetCommitResponse(t *testing.T) {
 	if response.ClientID != "az" {
 		t.Error("Decoding produced wrong client ID.")
 	}
-	if len(response.Errors) == 2 {
-		if len(response.Errors["m"]) != 0 {
-			t.Error("Decoding produced errors for topic 'm' where there were none.")
-		}
-		if len(response.Errors["t"]) == 1 {
-			if response.Errors["t"][0] != NOT_LEADER_FOR_PARTITION {
-				t.Error("Decoding produced wrong error for topic 't' partition 0.")
-			}
-		} else {
-			t.Error("Decoding produced wrong number of errors for topic 't'.")
-		}
-	} else {
-		t.Error("Decoding produced wrong number of errors.")
+
+	if len(response.Errors) != 2 {
+		t.Fatal("Decoding produced wrong number of errors.")
+	}
+
+	if len(response.Errors["m"]) != 0 {
+		t.Error("Decoding produced errors for topic 'm' where there were none.")
+	}
+
+	if len(response.Errors["t"]) != 1 {
+		t.Fatal("Decoding produced wrong number of errors for topic 't'.")
+	}
+
+	if response.Errors["t"][0] != NotLeaderForPartition {
+		t.Error("Decoding produced wrong error for topic 't' partition 0.")
 	}
+
 }

+ 2 - 2
offset_fetch_request.go

@@ -32,10 +32,10 @@ func (r *OffsetFetchRequest) version() int16 {
 	return 0
 }
 
-func (r *OffsetFetchRequest) AddPartition(topic string, partition_id int32) {
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
 	if r.partitions == nil {
 		r.partitions = make(map[string][]int32)
 	}
 
-	r.partitions[topic] = append(r.partitions[topic], partition_id)
+	r.partitions[topic] = append(r.partitions[topic], partitionID)
 }

+ 23 - 19
offset_fetch_response_test.go

@@ -41,24 +41,28 @@ func TestNormalOffsetFetchResponse(t *testing.T) {
 	if response.ClientID != "za" {
 		t.Error("Decoding produced wrong client ID.")
 	}
-	if len(response.Blocks) == 2 {
-		if len(response.Blocks["m"]) != 0 {
-			t.Error("Decoding produced partitions for topic 'm' where there were none.")
-		}
-		if len(response.Blocks["t"]) == 1 {
-			if response.Blocks["t"][0].Offset != 0 {
-				t.Error("Decoding produced wrong offset for topic 't' partition 0.")
-			}
-			if response.Blocks["t"][0].Metadata != "md" {
-				t.Error("Decoding produced wrong metadata for topic 't' partition 0.")
-			}
-			if response.Blocks["t"][0].Err != REQUEST_TIMED_OUT {
-				t.Error("Decoding produced wrong error for topic 't' partition 0.")
-			}
-		} else {
-			t.Error("Decoding produced wrong number of blocks for topic 't'.")
-		}
-	} else {
-		t.Error("Decoding produced wrong number of blocks.")
+
+	if len(response.Blocks) != 2 {
+		t.Fatal("Decoding produced wrong number of blocks.")
+	}
+
+	if len(response.Blocks["m"]) != 0 {
+		t.Error("Decoding produced partitions for topic 'm' where there were none.")
+	}
+
+	if len(response.Blocks["t"]) != 1 {
+		t.Fatal("Decoding produced wrong number of blocks for topic 't'.")
+	}
+
+	if response.Blocks["t"][0].Offset != 0 {
+		t.Error("Decoding produced wrong offset for topic 't' partition 0.")
+	}
+
+	if response.Blocks["t"][0].Metadata != "md" {
+		t.Error("Decoding produced wrong metadata for topic 't' partition 0.")
+	}
+
+	if response.Blocks["t"][0].Err != RequestTimedOut {
+		t.Error("Decoding produced wrong error for topic 't' partition 0.")
 	}
 }

+ 6 - 6
offset_request.go

@@ -5,11 +5,11 @@ package sarama
 type OffsetTime int64
 
 const (
-	// Ask for the latest offsets.
-	LATEST_OFFSETS OffsetTime = -1
-	// Ask for the earliest available offset. Note that because offsets are pulled in descending order,
+	// LatestOffsets askes for the latest offsets.
+	LatestOffsets OffsetTime = -1
+	// EarliestOffset askes for the earliest available offset. Note that because offsets are pulled in descending order,
 	// asking for the earliest offset will always return you a single element.
-	EARLIEST_OFFSET OffsetTime = -2
+	EarliestOffset OffsetTime = -2
 )
 
 type offsetRequestBlock struct {
@@ -61,7 +61,7 @@ func (r *OffsetRequest) version() int16 {
 	return 0
 }
 
-func (r *OffsetRequest) AddBlock(topic string, partition_id int32, time OffsetTime, maxOffsets int32) {
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time OffsetTime, maxOffsets int32) {
 	if r.blocks == nil {
 		r.blocks = make(map[string]map[int32]*offsetRequestBlock)
 	}
@@ -74,5 +74,5 @@ func (r *OffsetRequest) AddBlock(topic string, partition_id int32, time OffsetTi
 	tmp.time = time
 	tmp.maxOffsets = maxOffsets
 
-	r.blocks[topic][partition_id] = tmp
+	r.blocks[topic][partitionID] = tmp
 }

+ 24 - 21
offset_response_test.go

@@ -34,26 +34,29 @@ func TestNormalOffsetResponse(t *testing.T) {
 	response := OffsetResponse{}
 
 	testDecodable(t, "normal", &response, normalOffsetResponse)
-	if len(response.Blocks) == 2 {
-		if len(response.Blocks["a"]) != 0 {
-			t.Error("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
-		}
-
-		if len(response.Blocks["z"]) == 1 {
-			if response.Blocks["z"][2].Err != NO_ERROR {
-				t.Error("Decoding produced invalid error for topic z partition 2.")
-			}
-			if len(response.Blocks["z"][2].Offsets) == 2 {
-				if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 {
-					t.Error("Decoding produced invalid offsets for topic z partition 2.")
-				}
-			} else {
-				t.Error("Decoding produced invalid number of offsets for topic z partition 2.")
-			}
-		} else {
-			t.Error("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
-		}
-	} else {
-		t.Error("Decoding produced", len(response.Blocks), "topics where there were two.")
+
+	if len(response.Blocks) != 2 {
+		t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
+	}
+
+	if len(response.Blocks["a"]) != 0 {
+		t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
+	}
+
+	if len(response.Blocks["z"]) != 1 {
+		t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
+	}
+
+	if response.Blocks["z"][2].Err != NoError {
+		t.Fatal("Decoding produced invalid error for topic z partition 2.")
+	}
+
+	if len(response.Blocks["z"][2].Offsets) != 2 {
+		t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.")
 	}
+
+	if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 {
+		t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
+	}
+
 }

+ 8 - 5
prep_encoder.go

@@ -1,6 +1,9 @@
 package sarama
 
-import "math"
+import (
+	"encoding/binary"
+	"math"
+)
 
 type prepEncoder struct {
 	length int
@@ -9,19 +12,19 @@ type prepEncoder struct {
 // primitives
 
 func (pe *prepEncoder) putInt8(in int8) {
-	pe.length += 1
+	pe.length += binary.Size(in)
 }
 
 func (pe *prepEncoder) putInt16(in int16) {
-	pe.length += 2
+	pe.length += binary.Size(in)
 }
 
 func (pe *prepEncoder) putInt32(in int32) {
-	pe.length += 4
+	pe.length += binary.Size(in)
 }
 
 func (pe *prepEncoder) putInt64(in int64) {
-	pe.length += 8
+	pe.length += binary.Size(in)
 }
 
 func (pe *prepEncoder) putArrayLength(in int) error {

+ 6 - 3
produce_request.go

@@ -5,9 +5,12 @@ package sarama
 type RequiredAcks int16
 
 const (
-	NO_RESPONSE    RequiredAcks = 0  // Don't send any response, the TCP ACK is all you get.
-	WAIT_FOR_LOCAL RequiredAcks = 1  // Wait for only the local commit to succeed before responding.
-	WAIT_FOR_ALL   RequiredAcks = -1 // Wait for all replicas to commit before responding.
+	// NoResponse doesn't send any response, the TCP ACK is all you get.
+	NoResponse RequiredAcks = 0
+	// WaitForLocal waits for only the local commit to succeed before responding.
+	WaitForLocal RequiredAcks = 1
+	// WaitForAll waits for all replicas to commit before responding.
+	WaitForAll RequiredAcks = -1
 )
 
 type ProduceRequest struct {

+ 1 - 1
produce_request_test.go

@@ -40,6 +40,6 @@ func TestProduceRequest(t *testing.T) {
 	request.Timeout = 0x444
 	testEncodable(t, "header", request, produceRequestHeader)
 
-	request.AddMessage("topic", 0xAD, &Message{Codec: COMPRESSION_NONE, Key: nil, Value: []byte{0x00, 0xEE}})
+	request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}})
 	testEncodable(t, "one message", request, produceRequestOneMessage)
 }

+ 2 - 2
produce_response_test.go

@@ -46,7 +46,7 @@ func TestProduceResponse(t *testing.T) {
 	if block == nil {
 		t.Error("Decoding did not produce a block for bar/1")
 	} else {
-		if block.Err != NO_ERROR {
+		if block.Err != NoError {
 			t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err))
 		}
 		if block.Offset != 0xFF {
@@ -57,7 +57,7 @@ func TestProduceResponse(t *testing.T) {
 	if block == nil {
 		t.Error("Decoding did not produce a block for bar/2")
 	} else {
-		if block.Err != INVALID_MESSAGE {
+		if block.Err != InvalidMessage {
 			t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err))
 		}
 		if block.Offset != 0 {

+ 2 - 2
producer.go

@@ -127,9 +127,9 @@ func (p *Producer) safeSendMessage(key, value Encoder, retry bool) error {
 	}
 
 	switch block.Err {
-	case NO_ERROR:
+	case NoError:
 		return nil
-	case UNKNOWN_TOPIC_OR_PARTITION, NOT_LEADER_FOR_PARTITION, LEADER_NOT_AVAILABLE:
+	case UnknownTopicOrPartition, NotLeaderForPartition, LeaderNotAvailable:
 		if !retry {
 			return block.Err
 		}

+ 2 - 2
producer_test.go

@@ -51,7 +51,7 @@ func TestSimpleProducer(t *testing.T) {
 	}
 	defer client.Close()
 
-	producer, err := NewProducer(client, "my_topic", &ProducerConfig{RequiredAcks: WAIT_FOR_LOCAL})
+	producer, err := NewProducer(client, "my_topic", &ProducerConfig{RequiredAcks: WaitForLocal})
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -74,7 +74,7 @@ func ExampleProducer() {
 	}
 	defer client.Close()
 
-	producer, err := NewProducer(client, "my_topic", &ProducerConfig{RequiredAcks: WAIT_FOR_LOCAL})
+	producer, err := NewProducer(client, "my_topic", &ProducerConfig{RequiredAcks: WaitForLocal})
 	if err != nil {
 		panic(err)
 	}

+ 24 - 18
real_decoder.go

@@ -19,7 +19,7 @@ func (rd *realDecoder) getInt8() (int8, error) {
 		return -1, InsufficientData
 	}
 	tmp := int8(rd.raw[rd.off])
-	rd.off += 1
+	rd.off += binary.Size(tmp)
 	return tmp, nil
 }
 
@@ -29,7 +29,7 @@ func (rd *realDecoder) getInt16() (int16, error) {
 		return -1, InsufficientData
 	}
 	tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
-	rd.off += 2
+	rd.off += binary.Size(tmp)
 	return tmp, nil
 }
 
@@ -39,7 +39,7 @@ func (rd *realDecoder) getInt32() (int32, error) {
 		return -1, InsufficientData
 	}
 	tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
+	rd.off += binary.Size(tmp)
 	return tmp, nil
 }
 
@@ -49,7 +49,7 @@ func (rd *realDecoder) getInt64() (int64, error) {
 		return -1, InsufficientData
 	}
 	tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
-	rd.off += 8
+	rd.off += binary.Size(tmp)
 	return tmp, nil
 }
 
@@ -131,16 +131,19 @@ func (rd *realDecoder) getInt32Array() ([]int32, error) {
 	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
 	rd.off += 4
 
-	var ret []int32 = nil
 	if rd.remaining() < 4*n {
 		rd.off = len(rd.raw)
 		return nil, InsufficientData
-	} else if n > 0 {
-		ret = make([]int32, n)
-		for i := range ret {
-			ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-			rd.off += 4
-		}
+	}
+
+	if n <= 0 {
+		return nil, nil
+	}
+
+	ret := make([]int32, n)
+	for i := range ret {
+		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+		rd.off += binary.Size(ret[i])
 	}
 	return ret, nil
 }
@@ -153,16 +156,19 @@ func (rd *realDecoder) getInt64Array() ([]int64, error) {
 	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
 	rd.off += 4
 
-	var ret []int64 = nil
 	if rd.remaining() < 8*n {
 		rd.off = len(rd.raw)
 		return nil, InsufficientData
-	} else if n > 0 {
-		ret = make([]int64, n)
-		for i := range ret {
-			ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
-			rd.off += 8
-		}
+	}
+
+	if n <= 0 {
+		return nil, nil
+	}
+
+	ret := make([]int64, n)
+	for i := range ret {
+		ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+		rd.off += binary.Size(ret[i])
 	}
 	return ret, nil
 }

+ 4 - 4
real_encoder.go

@@ -12,22 +12,22 @@ type realEncoder struct {
 
 func (re *realEncoder) putInt8(in int8) {
 	re.raw[re.off] = byte(in)
-	re.off += 1
+	re.off += binary.Size(in)
 }
 
 func (re *realEncoder) putInt16(in int16) {
 	binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
-	re.off += 2
+	re.off += binary.Size(in)
 }
 
 func (re *realEncoder) putInt32(in int32) {
 	binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
-	re.off += 4
+	re.off += binary.Size(in)
 }
 
 func (re *realEncoder) putInt64(in int64) {
 	binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
-	re.off += 8
+	re.off += binary.Size(in)
 }
 
 func (re *realEncoder) putArrayLength(in int) error {

+ 4 - 4
request.go

@@ -7,16 +7,16 @@ type requestEncoder interface {
 }
 
 type request struct {
-	correlation_id int32
-	id             string
-	body           requestEncoder
+	correlationID int32
+	id            string
+	body          requestEncoder
 }
 
 func (r *request) encode(pe packetEncoder) (err error) {
 	pe.push(&lengthField{})
 	pe.putInt16(r.body.key())
 	pe.putInt16(r.body.version())
-	pe.putInt32(r.correlation_id)
+	pe.putInt32(r.correlationID)
 	err = pe.putString(r.id)
 	if err != nil {
 		return err

+ 1 - 1
request_test.go

@@ -31,7 +31,7 @@ func (s *testRequestBody) encode(pe packetEncoder) error {
 }
 
 func TestRequest(t *testing.T) {
-	request := request{correlation_id: 0x1234, id: "myClient", body: new(testRequestBody)}
+	request := request{correlationID: 0x1234, id: "myClient", body: new(testRequestBody)}
 	testEncodable(t, "simple", &request, requestSimple)
 }
 

+ 3 - 3
response_header.go

@@ -3,8 +3,8 @@ package sarama
 import "math"
 
 type responseHeader struct {
-	length         int32
-	correlation_id int32
+	length        int32
+	correlationID int32
 }
 
 func (r *responseHeader) decode(pd packetDecoder) (err error) {
@@ -16,6 +16,6 @@ func (r *responseHeader) decode(pd packetDecoder) (err error) {
 		return DecodingError
 	}
 
-	r.correlation_id, err = pd.getInt32()
+	r.correlationID, err = pd.getInt32()
 	return err
 }

+ 2 - 2
response_header_test.go

@@ -15,7 +15,7 @@ func TestResponseHeader(t *testing.T) {
 	if header.length != 0xf00 {
 		t.Error("Decoding header length failed, got", header.length)
 	}
-	if header.correlation_id != 0x0abbccff {
-		t.Error("Decoding header correlation id failed, got", header.correlation_id)
+	if header.correlationID != 0x0abbccff {
+		t.Error("Decoding header correlation id failed, got", header.correlationID)
 	}
 }

+ 1 - 1
utils.go

@@ -15,7 +15,7 @@ func (slice int32Slice) Swap(i, j int) {
 	slice[i], slice[j] = slice[j], slice[i]
 }
 
-// A simple interface for any type that can be encoded as an array of bytes
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
 // in order to be sent as the key or value of a Kafka message.
 type Encoder interface {
 	Encode() ([]byte, error)