Helen Tran 11 лет назад
Родитель
Сommit
61c709f501
70 измененных файлов с 105 добавлено и 6148 удалено
  1. BIN
      .DS_Store
  2. 0 10
      .travis.yml
  3. 0 20
      MIT-LICENSE
  4. 1 0
      _config.yml
  5. BIN
      _includes/.DS_Store
  6. 15 0
      _includes/footer.html
  7. 28 0
      _includes/head.html
  8. BIN
      _layouts/.DS_Store
  9. 57 0
      _layouts/index.html
  10. 0 399
      broker.go
  11. 0 178
      broker_test.go
  12. 0 402
      client.go
  13. 0 114
      client_test.go
  14. 0 358
      consumer.go
  15. 0 17
      consumer_metadata_request.go
  16. 0 19
      consumer_metadata_request_test.go
  17. 0 33
      consumer_metadata_response.go
  18. 0 61
      consumer_metadata_response_test.go
  19. 0 152
      consumer_test.go
  20. 0 35
      crc32_field.go
  21. 0 56
      encoder_decoder.go
  22. 0 136
      errors.go
  23. 0 70
      fetch_request.go
  24. 0 34
      fetch_request_test.go
  25. 0 155
      fetch_response.go
  26. 0 84
      fetch_response_test.go
  27. 4 0
      index.md
  28. 0 29
      length_field.go
  29. 0 153
      message.go
  30. 0 93
      message_set.go
  31. 0 113
      message_test.go
  32. 0 28
      metadata_request.go
  33. 0 29
      metadata_request_test.go
  34. 0 218
      metadata_response.go
  35. 0 139
      metadata_response_test.go
  36. 0 161
      mockbroker.go
  37. 0 71
      offset_commit_request.go
  38. 0 34
      offset_commit_request_test.go
  39. 0 42
      offset_commit_response.go
  40. 0 52
      offset_commit_response_test.go
  41. 0 41
      offset_fetch_request.go
  42. 0 31
      offset_fetch_request_test.go
  43. 0 82
      offset_fetch_response.go
  44. 0 61
      offset_fetch_response_test.go
  45. 0 78
      offset_request.go
  46. 0 26
      offset_request_test.go
  47. 0 124
      offset_response.go
  48. 0 62
      offset_response_test.go
  49. 0 44
      packet_decoder.go
  50. 0 41
      packet_encoder.go
  51. 0 86
      partitioner.go
  52. 0 74
      partitioner_test.go
  53. 0 95
      prep_encoder.go
  54. 0 101
      produce_message.go
  55. 0 79
      produce_request.go
  56. 0 45
      produce_request_test.go
  57. 0 112
      produce_response.go
  58. 0 67
      produce_response_test.go
  59. 0 487
      producer.go
  60. 0 344
      producer_test.go
  61. 0 225
      real_decoder.go
  62. 0 94
      real_encoder.go
  63. 0 29
      request.go
  64. 0 55
      request_test.go
  65. 0 23
      response_header.go
  66. 0 21
      response_header_test.go
  67. 0 21
      sarama.go
  68. 0 36
      snappy.go
  69. 0 16
      snappy_test.go
  70. 0 53
      utils.go

+ 0 - 10
.travis.yml

@@ -1,10 +0,0 @@
-language: go
-go:
-- 1.1
-- 1.2
-
-before_install:
-- sudo apt-get install zookeeper 2>&1
-- wget http://apache.mirror.nexicom.net/kafka/0.8.1.1/kafka_2.10-0.8.1.1.tgz -O kafka.tgz
-- mkdir -p kafka && tar xzf kafka.tgz -C kafka --strip-components 1
-- nohup bash -c "cd kafka && bin/kafka-server-start.sh config/server.properties &"

+ 0 - 20
MIT-LICENSE

@@ -1,20 +0,0 @@
-Copyright (c) 2013 Evan Huus
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 1 - 0
_config.yml

@@ -0,0 +1 @@
+markdown: kramdown

BIN
_includes/.DS_Store


+ 15 - 0
_includes/footer.html

@@ -0,0 +1,15 @@
+<footer class="footer pagewidth">
+
+  <div class="grid">
+    <div class="grid-item xlarge--one-third xlarge--text-left">
+      Built and maintained by <a href="http://www.shopify.com">Shopify Inc.</a> © 2014
+    </div>
+    <div class="grid-item xlarge--one-third">
+      Want to contribute? <a href="http://www.shopify.com/careers">Join the team</a>
+    </div>
+    <div class="grid-item xlarge--one-third xlarge--text-right">
+      <a href="https://twitter.com/Shopify" class="twitter-link"><span class="under">Follow us</span> <i class="icon-twitter"></i></a>
+    </div>
+  </div>
+
+</footer>

+ 28 - 0
_includes/head.html

@@ -0,0 +1,28 @@
+<meta charset="utf-8">
+<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+
+<title>Shopify Open Source > {{ site.github.project_title }}</title>
+
+{% if page.description %}
+  <meta name="description" content="{{ site.github.project_tagline }}">
+{% endif %}
+
+<link rel="canonical" href="http://shopify.github.io">
+
+<meta name="robots" content="index, follow">
+<meta name="st:robots" content="index, follow">
+
+<meta name="viewport" content="width=device-width, initial-scale=1">
+
+<meta property='st:title' content="Shopify Open Source > {{ site.github.project_title }}">
+
+<link rel="shortcut icon" href="//cdn.shopify.com/assets/favicon.ico" type="image/x-icon">
+
+<link href="//shopify.github.io/css/sub.css" rel="stylesheet" type="text/css">
+<!--[if lt IE 9]>
+  <link href="http://shopify.github.io/css/subie.css" rel="stylesheet" type="text/css">
+<![endif]-->
+
+<!--[if IE]>
+  <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script>
+<![endif]-->

BIN
_layouts/.DS_Store


+ 57 - 0
_layouts/index.html

@@ -0,0 +1,57 @@
+<!doctype html>
+<!--[if IE 8]><html class="no-js lt-ie9" lang="en"> <![endif]-->
+<!--[if IE 9 ]><html class="ie9 no-js"> <![endif]-->
+<!--[if (gt IE 9)|!(IE)]><!--> <html class="no-js"> <!--<![endif]-->
+  <head>
+  {% include head.html %}
+  </head>
+    <body>
+    <div class="hero">
+      <div class="hero-lines"></div>
+      <header class="hero-header">
+        <div class="pagewidth">
+          <div class="logo--ie">
+            <img src="http://shopify.github.io/images/shopify-open-source-sub.svg" alt="Shopify Open Source" class="logo">
+            <span class="breadcrumb"><a href="http://shopify.github.io">Open Source</a> > {{ site.github.project_title }}</span>
+          </div>
+          <div class="repo-lang {{ site.github.language | downcase }}">
+            {{ site.github.language }}
+          </div>
+        </div>
+      </header>
+      <div class="pagewidth">
+        <div class="hero-inner">
+          <h1 class="hero-logo">{{ site.github.project_title }}</h1>
+          <h2 class="hero-text">{{ site.github.project_tagline }}</h2>
+          <div class="cta-buttons">
+            <a href="{{ site.github.zip_url }}" class="float">Download ZIP</a>
+            <a href="{{ site.github.repository_url }}" class="float github">
+              Github Repo
+              <i class="icon-star" title="Stars"></i> <span id="starCount"></span>
+              <i class="icon-forks" title="Forks"></i> <span id="forkCount"></span>
+            </a>
+          </div>
+        </div>
+      </div>
+    </div>
+
+    <div class="documentation">
+      <div class="pagewidth">
+
+        {{ content }}
+
+      </div>
+    </div>
+    {% include footer.html %}
+
+    <script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js"></script>
+    <script src="//shopify.github.io/javascripts/sub.js"></script>
+    <script>
+    jQuery(function($) {
+      shopifyOpenSource.init({
+        repo_name: '{{ site.github.repository_nwo }}'
+      });
+    });
+    </script>
+    </body>
+</html>

+ 0 - 399
broker.go

@@ -1,399 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"io"
-	"net"
-	"strconv"
-	"sync"
-	"time"
-)
-
-// BrokerConfig is used to pass multiple configuration options to Broker.Open.
-type BrokerConfig struct {
-	MaxOpenRequests int           // How many outstanding requests the broker is allowed to have before blocking attempts to send.
-	ReadTimeout     time.Duration // How long to wait for a response before timing out and returning an error.
-	WriteTimeout    time.Duration // How long to wait for a transmit to succeed before timing out and returning an error.
-}
-
-// NewBrokerConfig returns a new broker configuration with sane defaults.
-func NewBrokerConfig() *BrokerConfig {
-	return &BrokerConfig{
-		MaxOpenRequests: 4,
-		ReadTimeout:     1 * time.Minute,
-		WriteTimeout:    1 * time.Minute,
-	}
-}
-
-// Validates a BrokerConfig instance. This will return a
-// ConfigurationError if the specified values don't make sense.
-func (config *BrokerConfig) Validate() error {
-	if config.MaxOpenRequests < 0 {
-		return ConfigurationError("Invalid MaxOpenRequests")
-	}
-
-	if config.ReadTimeout <= 0 {
-		return ConfigurationError("Invalid ReadTimeout")
-	}
-
-	if config.WriteTimeout <= 0 {
-		return ConfigurationError("Invalid WriteTimeout")
-	}
-
-	return nil
-}
-
-// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
-type Broker struct {
-	id   int32
-	addr string
-
-	conf          *BrokerConfig
-	correlationID int32
-	conn          net.Conn
-	connErr       error
-	lock          sync.Mutex
-
-	responses chan responsePromise
-	done      chan bool
-}
-
-type responsePromise struct {
-	correlationID int32
-	packets       chan []byte
-	errors        chan error
-}
-
-// NewBroker creates and returns a Broker targetting the given host:port address.
-// This does not attempt to actually connect, you have to call Open() for that.
-func NewBroker(addr string) *Broker {
-	return &Broker{id: -1, addr: addr}
-}
-
-// Open tries to connect to the Broker. It takes the broker lock synchronously, then spawns a goroutine which
-// connects and releases the lock. This means any subsequent operations on the broker will block waiting for
-// the connection to finish. To get the effect of a fully synchronous Open call, follow it by a call to Connected().
-// The only errors Open will return directly are ConfigurationError or AlreadyConnected. If conf is nil, the result of
-// NewBrokerConfig() is used.
-func (b *Broker) Open(conf *BrokerConfig) error {
-	if conf == nil {
-		conf = NewBrokerConfig()
-	}
-
-	err := conf.Validate()
-	if err != nil {
-		return err
-	}
-
-	b.lock.Lock()
-
-	if b.conn != nil {
-		b.lock.Unlock()
-		Logger.Printf("Failed to connect to broker %s\n", b.addr)
-		Logger.Println(AlreadyConnected)
-		return AlreadyConnected
-	}
-
-	go withRecover(func() {
-		defer b.lock.Unlock()
-
-		b.conn, b.connErr = net.Dial("tcp", b.addr)
-		if b.connErr != nil {
-			Logger.Printf("Failed to connect to broker %s\n", b.addr)
-			Logger.Println(b.connErr)
-			return
-		}
-
-		b.conf = conf
-		b.done = make(chan bool)
-		b.responses = make(chan responsePromise, b.conf.MaxOpenRequests)
-
-		Logger.Printf("Connected to broker %s\n", b.addr)
-		go withRecover(b.responseReceiver)
-	})
-
-	return nil
-}
-
-// Connected returns true if the broker is connected and false otherwise. If the broker is not
-// connected but it had tried to connect, the error from that connection attempt is also returned.
-func (b *Broker) Connected() (bool, error) {
-	b.lock.Lock()
-	defer b.lock.Unlock()
-
-	return b.conn != nil, b.connErr
-}
-
-func (b *Broker) Close() (err error) {
-	b.lock.Lock()
-	defer b.lock.Unlock()
-	defer func() {
-		if err == nil {
-			Logger.Printf("Closed connection to broker %s\n", b.addr)
-		} else {
-			Logger.Printf("Failed to close connection to broker %s.\n", b.addr)
-			Logger.Println(err)
-		}
-	}()
-
-	if b.conn == nil {
-		return NotConnected
-	}
-
-	close(b.responses)
-	<-b.done
-
-	err = b.conn.Close()
-
-	b.conn = nil
-	b.connErr = nil
-	b.done = nil
-	b.responses = nil
-
-	return
-}
-
-// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
-func (b *Broker) ID() int32 {
-	return b.id
-}
-
-// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
-func (b *Broker) Addr() string {
-	return b.addr
-}
-
-func (b *Broker) GetMetadata(clientID string, request *MetadataRequest) (*MetadataResponse, error) {
-	response := new(MetadataResponse)
-
-	err := b.sendAndReceive(clientID, request, response)
-
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-func (b *Broker) GetConsumerMetadata(clientID string, request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
-	response := new(ConsumerMetadataResponse)
-
-	err := b.sendAndReceive(clientID, request, response)
-
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-func (b *Broker) GetAvailableOffsets(clientID string, request *OffsetRequest) (*OffsetResponse, error) {
-	response := new(OffsetResponse)
-
-	err := b.sendAndReceive(clientID, request, response)
-
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-func (b *Broker) Produce(clientID string, request *ProduceRequest) (*ProduceResponse, error) {
-	var response *ProduceResponse
-	var err error
-
-	if request.RequiredAcks == NoResponse {
-		err = b.sendAndReceive(clientID, request, nil)
-	} else {
-		response = new(ProduceResponse)
-		err = b.sendAndReceive(clientID, request, response)
-	}
-
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-func (b *Broker) Fetch(clientID string, request *FetchRequest) (*FetchResponse, error) {
-	response := new(FetchResponse)
-
-	err := b.sendAndReceive(clientID, request, response)
-
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-func (b *Broker) CommitOffset(clientID string, request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
-	response := new(OffsetCommitResponse)
-
-	err := b.sendAndReceive(clientID, request, response)
-
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-func (b *Broker) FetchOffset(clientID string, request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
-	response := new(OffsetFetchResponse)
-
-	err := b.sendAndReceive(clientID, request, response)
-
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-func (b *Broker) send(clientID string, req requestEncoder, promiseResponse bool) (*responsePromise, error) {
-	b.lock.Lock()
-	defer b.lock.Unlock()
-
-	if b.conn == nil {
-		if b.connErr != nil {
-			return nil, b.connErr
-		}
-		return nil, NotConnected
-	}
-
-	fullRequest := request{b.correlationID, clientID, req}
-	buf, err := encode(&fullRequest)
-	if err != nil {
-		return nil, err
-	}
-
-	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.WriteTimeout))
-	if err != nil {
-		return nil, err
-	}
-
-	_, err = b.conn.Write(buf)
-	if err != nil {
-		return nil, err
-	}
-	b.correlationID++
-
-	if !promiseResponse {
-		return nil, nil
-	}
-
-	promise := responsePromise{fullRequest.correlationID, make(chan []byte), make(chan error)}
-	b.responses <- promise
-
-	return &promise, nil
-}
-
-func (b *Broker) sendAndReceive(clientID string, req requestEncoder, res decoder) error {
-	promise, err := b.send(clientID, req, res != nil)
-
-	if err != nil {
-		return err
-	}
-
-	if promise == nil {
-		return nil
-	}
-
-	select {
-	case buf := <-promise.packets:
-		return decode(buf, res)
-	case err = <-promise.errors:
-		return err
-	}
-}
-
-func (b *Broker) decode(pd packetDecoder) (err error) {
-	b.id, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	host, err := pd.getString()
-	if err != nil {
-		return err
-	}
-
-	port, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	b.addr = fmt.Sprint(host, ":", port)
-
-	return nil
-}
-
-func (b *Broker) encode(pe packetEncoder) (err error) {
-
-	host, portstr, err := net.SplitHostPort(b.addr)
-	if err != nil {
-		return err
-	}
-	port, err := strconv.Atoi(portstr)
-	if err != nil {
-		return err
-	}
-
-	pe.putInt32(b.id)
-
-	err = pe.putString(host)
-	if err != nil {
-		return err
-	}
-
-	pe.putInt32(int32(port))
-
-	return nil
-}
-
-func (b *Broker) responseReceiver() {
-	header := make([]byte, 8)
-	for response := range b.responses {
-		err := b.conn.SetReadDeadline(time.Now().Add(b.conf.ReadTimeout))
-		if err != nil {
-			response.errors <- err
-			continue
-		}
-
-		_, err = io.ReadFull(b.conn, header)
-		if err != nil {
-			response.errors <- err
-			continue
-		}
-
-		decodedHeader := responseHeader{}
-		err = decode(header, &decodedHeader)
-		if err != nil {
-			response.errors <- err
-			continue
-		}
-		if decodedHeader.correlationID != response.correlationID {
-			// TODO if decoded ID < cur ID, discard until we catch up
-			// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
-			response.errors <- DecodingError{Info: "CorrelationID didn't match"}
-			continue
-		}
-
-		buf := make([]byte, decodedHeader.length-4)
-		_, err = io.ReadFull(b.conn, buf)
-		if err != nil {
-			// XXX: the above ReadFull call inherits the same ReadDeadline set at the top of this loop, so it may
-			// fail with a timeout error. If this happens, our connection is permanently toast since we will no longer
-			// be aligned correctly on the stream (we'll be reading garbage Kafka headers from the middle of data).
-			// Can we/should we fail harder in that case?
-			response.errors <- err
-			continue
-		}
-
-		response.packets <- buf
-	}
-	close(b.done)
-}

+ 0 - 178
broker_test.go

@@ -1,178 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"testing"
-)
-
-func ExampleBroker() error {
-	broker := NewBroker("localhost:9092")
-	err := broker.Open(nil)
-	if err != nil {
-		return err
-	}
-	defer broker.Close()
-
-	request := MetadataRequest{Topics: []string{"myTopic"}}
-	response, err := broker.GetMetadata("myClient", &request)
-	if err != nil {
-		return err
-	}
-
-	fmt.Println("There are", len(response.Topics), "topics active in the cluster.")
-
-	return nil
-}
-
-type mockEncoder struct {
-	bytes []byte
-}
-
-func (m mockEncoder) encode(pe packetEncoder) error {
-	pe.putRawBytes(m.bytes)
-	return nil
-}
-
-func TestBrokerAccessors(t *testing.T) {
-	broker := NewBroker("abc:123")
-
-	if broker.ID() != -1 {
-		t.Error("New broker didn't have an ID of -1.")
-	}
-
-	if broker.Addr() != "abc:123" {
-		t.Error("New broker didn't have the correct address")
-	}
-
-	broker.id = 34
-	if broker.ID() != 34 {
-		t.Error("Manually setting broker ID did not take effect.")
-	}
-}
-
-func TestSimpleBrokerCommunication(t *testing.T) {
-	mb := NewMockBroker(t, 0)
-	defer mb.Close()
-
-	broker := NewBroker(mb.Addr())
-	err := broker.Open(nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	for _, tt := range brokerTestTable {
-		mb.Returns(&mockEncoder{tt.response})
-	}
-	for _, tt := range brokerTestTable {
-		tt.runner(t, broker)
-	}
-
-	err = broker.Close()
-	if err != nil {
-		t.Error(err)
-	}
-}
-
-// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake
-var brokerTestTable = []struct {
-	response []byte
-	runner   func(*testing.T, *Broker)
-}{
-	{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
-		func(t *testing.T, broker *Broker) {
-			request := MetadataRequest{}
-			response, err := broker.GetMetadata("clientID", &request)
-			if err != nil {
-				t.Error(err)
-			}
-			if response == nil {
-				t.Error("Metadata request got no response!")
-			}
-		}},
-
-	{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00},
-		func(t *testing.T, broker *Broker) {
-			request := ConsumerMetadataRequest{}
-			response, err := broker.GetConsumerMetadata("clientID", &request)
-			if err != nil {
-				t.Error(err)
-			}
-			if response == nil {
-				t.Error("Consumer Metadata request got no response!")
-			}
-		}},
-
-	{[]byte{},
-		func(t *testing.T, broker *Broker) {
-			request := ProduceRequest{}
-			request.RequiredAcks = NoResponse
-			response, err := broker.Produce("clientID", &request)
-			if err != nil {
-				t.Error(err)
-			}
-			if response != nil {
-				t.Error("Produce request with NoResponse got a response!")
-			}
-		}},
-
-	{[]byte{0x00, 0x00, 0x00, 0x00},
-		func(t *testing.T, broker *Broker) {
-			request := ProduceRequest{}
-			request.RequiredAcks = WaitForLocal
-			response, err := broker.Produce("clientID", &request)
-			if err != nil {
-				t.Error(err)
-			}
-			if response == nil {
-				t.Error("Produce request without NoResponse got no response!")
-			}
-		}},
-
-	{[]byte{0x00, 0x00, 0x00, 0x00},
-		func(t *testing.T, broker *Broker) {
-			request := FetchRequest{}
-			response, err := broker.Fetch("clientID", &request)
-			if err != nil {
-				t.Error(err)
-			}
-			if response == nil {
-				t.Error("Fetch request got no response!")
-			}
-		}},
-
-	{[]byte{0x00, 0x00, 0x00, 0x00},
-		func(t *testing.T, broker *Broker) {
-			request := OffsetFetchRequest{}
-			response, err := broker.FetchOffset("clientID", &request)
-			if err != nil {
-				t.Error(err)
-			}
-			if response == nil {
-				t.Error("OffsetFetch request got no response!")
-			}
-		}},
-
-	{[]byte{0x00, 0x00, 0x00, 0x00},
-		func(t *testing.T, broker *Broker) {
-			request := OffsetCommitRequest{}
-			response, err := broker.CommitOffset("clientID", &request)
-			if err != nil {
-				t.Error(err)
-			}
-			if response == nil {
-				t.Error("OffsetCommit request got no response!")
-			}
-		}},
-
-	{[]byte{0x00, 0x00, 0x00, 0x00},
-		func(t *testing.T, broker *Broker) {
-			request := OffsetRequest{}
-			response, err := broker.GetAvailableOffsets("clientID", &request)
-			if err != nil {
-				t.Error(err)
-			}
-			if response == nil {
-				t.Error("Offset request got no response!")
-			}
-		}},
-}

+ 0 - 402
client.go

@@ -1,402 +0,0 @@
-package sarama
-
-import (
-	"sort"
-	"sync"
-	"time"
-)
-
-// ClientConfig is used to pass multiple configuration options to NewClient.
-type ClientConfig struct {
-	MetadataRetries   int           // How many times to retry a metadata request when a partition is in the middle of leader election.
-	WaitForElection   time.Duration // How long to wait for leader election to finish between retries.
-	DefaultBrokerConf *BrokerConfig // Default configuration for broker connections created by this client.
-}
-
-// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
-// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
-// automatically when it passes out of scope. A single client can be safely shared by
-// multiple concurrent Producers and Consumers.
-type Client struct {
-	id     string
-	config ClientConfig
-
-	// the broker addresses given to us through the constructor are not guaranteed to be returned in
-	// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
-	// so we store them separately
-	extraBrokerAddrs []string
-	extraBroker      *Broker
-	deadBrokerAddrs  []string
-
-	brokers map[int32]*Broker          // maps broker ids to brokers
-	leaders map[string]map[int32]int32 // maps topics to partition ids to broker ids
-	lock    sync.RWMutex               // protects access to the maps, only one since they're always written together
-}
-
-// NewClient creates a new Client with the given client ID. It connects to one of the given broker addresses
-// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
-// be retrieved from any of the given broker addresses, the client is not created.
-func NewClient(id string, addrs []string, config *ClientConfig) (*Client, error) {
-	Logger.Println("Initializing new client")
-
-	if config == nil {
-		config = NewClientConfig()
-	}
-
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
-	if len(addrs) < 1 {
-		return nil, ConfigurationError("You must provide at least one broker address")
-	}
-
-	client := &Client{
-		id:               id,
-		config:           *config,
-		extraBrokerAddrs: addrs,
-		extraBroker:      NewBroker(addrs[0]),
-		brokers:          make(map[int32]*Broker),
-		leaders:          make(map[string]map[int32]int32),
-	}
-	client.extraBroker.Open(config.DefaultBrokerConf)
-
-	// do an initial fetch of all cluster metadata by specifing an empty list of topics
-	err := client.RefreshAllMetadata()
-	if err != nil {
-		client.Close() // this closes tmp, since it's still in the brokers hash
-		return nil, err
-	}
-
-	Logger.Println("Successfully initialized new client")
-
-	return client, nil
-}
-
-// Close shuts down all broker connections managed by this client. It is required to call this function before
-// a client object passes out of scope, as it will otherwise leak memory. You must close any Producers or Consumers
-// using a client before you close the client.
-func (client *Client) Close() error {
-	client.lock.Lock()
-	defer client.lock.Unlock()
-	Logger.Println("Closing Client")
-
-	for _, broker := range client.brokers {
-		myBroker := broker // NB: block-local prevents clobbering
-		go withRecover(func() { myBroker.Close() })
-	}
-	client.brokers = nil
-	client.leaders = nil
-
-	if client.extraBroker != nil {
-		go withRecover(func() { client.extraBroker.Close() })
-	}
-
-	return nil
-}
-
-// Partitions returns the sorted list of available partition IDs for the given topic.
-func (client *Client) Partitions(topic string) ([]int32, error) {
-	partitions := client.cachedPartitions(topic)
-
-	// len==0 catches when it's nil (no such topic) and the odd case when every single
-	// partition is undergoing leader election simultaneously. Callers have to be able to handle
-	// this function returning an empty slice (which is a valid return value) but catching it
-	// here the first time (note we *don't* catch it below where we return NoSuchTopic) triggers
-	// a metadata refresh as a nicety so callers can just try again and don't have to manually
-	// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
-	if len(partitions) == 0 {
-		err := client.RefreshTopicMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		partitions = client.cachedPartitions(topic)
-	}
-
-	if partitions == nil {
-		return nil, NoSuchTopic
-	}
-
-	return partitions, nil
-}
-
-// Topics returns the set of available topics as retrieved from the cluster metadata.
-func (client *Client) Topics() ([]string, error) {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	ret := make([]string, 0, len(client.leaders))
-	for topic := range client.leaders {
-		ret = append(ret, topic)
-	}
-
-	return ret, nil
-}
-
-// Leader returns the broker object that is the leader of the current topic/partition, as
-// determined by querying the cluster metadata.
-func (client *Client) Leader(topic string, partitionID int32) (*Broker, error) {
-	leader := client.cachedLeader(topic, partitionID)
-
-	if leader == nil {
-		err := client.RefreshTopicMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		leader = client.cachedLeader(topic, partitionID)
-	}
-
-	if leader == nil {
-		return nil, UnknownTopicOrPartition
-	}
-
-	return leader, nil
-}
-
-// RefreshTopicMetadata takes a list of topics and queries the cluster to refresh the
-// available metadata for those topics.
-func (client *Client) RefreshTopicMetadata(topics ...string) error {
-	return client.refreshMetadata(topics, client.config.MetadataRetries)
-}
-
-// RefreshAllMetadata queries the cluster to refresh the available metadata for all topics.
-func (client *Client) RefreshAllMetadata() error {
-	// Kafka refreshes all when you encode it an empty array...
-	return client.refreshMetadata(make([]string, 0), client.config.MetadataRetries)
-}
-
-// misc private helper functions
-
-// XXX: see https://github.com/Shopify/sarama/issues/15
-//      and https://github.com/Shopify/sarama/issues/23
-// disconnectBroker is a bad hacky way to accomplish broker management. It should be replaced with
-// something sane and the replacement should be made part of the public Client API
-func (client *Client) disconnectBroker(broker *Broker) {
-	client.lock.Lock()
-	defer client.lock.Unlock()
-	Logger.Printf("Disconnecting Broker %d\n", broker.ID())
-
-	client.deadBrokerAddrs = append(client.deadBrokerAddrs, broker.addr)
-
-	if broker == client.extraBroker {
-		client.extraBrokerAddrs = client.extraBrokerAddrs[1:]
-		if len(client.extraBrokerAddrs) > 0 {
-			client.extraBroker = NewBroker(client.extraBrokerAddrs[0])
-			client.extraBroker.Open(client.config.DefaultBrokerConf)
-		} else {
-			client.extraBroker = nil
-		}
-	} else {
-		// we don't need to update the leaders hash, it will automatically get refreshed next time because
-		// the broker lookup will return nil
-		delete(client.brokers, broker.ID())
-	}
-
-	myBroker := broker // NB: block-local prevents clobbering
-	go withRecover(func() { myBroker.Close() })
-}
-
-func (client *Client) refreshMetadata(topics []string, retries int) error {
-	// Kafka will throw exceptions on an empty topic and not return a proper
-	// error. This handles the case by returning an error instead of sending it
-	// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
-	for _, topic := range topics {
-		if len(topic) == 0 {
-			return NoSuchTopic
-		}
-	}
-
-	for broker := client.any(); broker != nil; broker = client.any() {
-		Logger.Printf("Fetching metadata from broker %s\n", broker.addr)
-		response, err := broker.GetMetadata(client.id, &MetadataRequest{Topics: topics})
-
-		switch err {
-		case nil:
-			// valid response, use it
-			retry, err := client.update(response)
-			switch {
-			case err != nil:
-				return err
-			case len(retry) == 0:
-				return nil
-			default:
-				if retries <= 0 {
-					return LeaderNotAvailable
-				}
-				Logger.Printf("Failed to fetch metadata from broker %s, waiting %dms... (%d retries remaining)\n", broker.addr, client.config.WaitForElection/time.Millisecond, retries)
-				time.Sleep(client.config.WaitForElection) // wait for leader election
-				return client.refreshMetadata(retry, retries-1)
-			}
-		case EncodingError:
-			// didn't even send, return the error
-			return err
-		}
-
-		// some other error, remove that broker and try again
-		Logger.Println("Unexpected error from GetMetadata, closing broker:", err)
-		client.disconnectBroker(broker)
-	}
-
-	if retries > 0 {
-		Logger.Printf("Out of available brokers. Resurrecting dead brokers after %dms... (%d retries remaining)\n", client.config.WaitForElection/time.Millisecond, retries)
-		time.Sleep(client.config.WaitForElection)
-		client.resurrectDeadBrokers()
-		return client.refreshMetadata(topics, retries-1)
-	} else {
-		Logger.Printf("Out of available brokers.\n")
-	}
-
-	return OutOfBrokers
-}
-
-func (client *Client) resurrectDeadBrokers() {
-	client.lock.Lock()
-	defer client.lock.Unlock()
-
-	brokers := make(map[string]struct{})
-	for _, addr := range client.deadBrokerAddrs {
-		brokers[addr] = struct{}{}
-	}
-	for _, addr := range client.extraBrokerAddrs {
-		brokers[addr] = struct{}{}
-	}
-
-	client.deadBrokerAddrs = []string{}
-	client.extraBrokerAddrs = []string{}
-	for addr := range brokers {
-		client.extraBrokerAddrs = append(client.extraBrokerAddrs, addr)
-	}
-
-	client.extraBroker = NewBroker(client.extraBrokerAddrs[0])
-	client.extraBroker.Open(client.config.DefaultBrokerConf)
-}
-
-func (client *Client) any() *Broker {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	for _, broker := range client.brokers {
-		return broker
-	}
-
-	return client.extraBroker
-}
-
-func (client *Client) cachedLeader(topic string, partitionID int32) *Broker {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	partitions := client.leaders[topic]
-	if partitions != nil {
-		leader, ok := partitions[partitionID]
-		if ok {
-			return client.brokers[leader]
-		}
-	}
-
-	return nil
-}
-
-func (client *Client) cachedPartitions(topic string) []int32 {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	partitions := client.leaders[topic]
-	if partitions == nil {
-		return nil
-	}
-
-	ret := make([]int32, 0, len(partitions))
-	for id := range partitions {
-		ret = append(ret, id)
-	}
-
-	sort.Sort(int32Slice(ret))
-	return ret
-}
-
-// if no fatal error, returns a list of topics that need retrying due to LeaderNotAvailable
-func (client *Client) update(data *MetadataResponse) ([]string, error) {
-	client.lock.Lock()
-	defer client.lock.Unlock()
-
-	// For all the brokers we received:
-	// - if it is a new ID, save it
-	// - if it is an existing ID, but the address we have is stale, discard the old one and save it
-	// - otherwise ignore it, replacing our existing one would just bounce the connection
-	// We asynchronously try to open connections to the new brokers. We don't care if they
-	// fail, since maybe that broker is unreachable but doesn't have a topic we care about.
-	// If it fails and we do care, whoever tries to use it will get the connection error.
-	for _, broker := range data.Brokers {
-		if client.brokers[broker.ID()] == nil {
-			broker.Open(client.config.DefaultBrokerConf)
-			client.brokers[broker.ID()] = broker
-			Logger.Printf("Registered new broker #%d at %s", broker.ID(), broker.Addr())
-		} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
-			myBroker := client.brokers[broker.ID()] // use block-local to prevent clobbering `broker` for Gs
-			go withRecover(func() { myBroker.Close() })
-			broker.Open(client.config.DefaultBrokerConf)
-			client.brokers[broker.ID()] = broker
-			Logger.Printf("Replaced registered broker #%d with %s", broker.ID(), broker.Addr())
-		}
-	}
-
-	toRetry := make(map[string]bool)
-
-	for _, topic := range data.Topics {
-		switch topic.Err {
-		case NoError:
-			break
-		case LeaderNotAvailable:
-			toRetry[topic.Name] = true
-		default:
-			return nil, topic.Err
-		}
-		client.leaders[topic.Name] = make(map[int32]int32, len(topic.Partitions))
-		for _, partition := range topic.Partitions {
-			switch partition.Err {
-			case LeaderNotAvailable:
-				toRetry[topic.Name] = true
-				delete(client.leaders[topic.Name], partition.ID)
-			case NoError:
-				client.leaders[topic.Name][partition.ID] = partition.Leader
-			default:
-				return nil, partition.Err
-			}
-		}
-	}
-
-	ret := make([]string, 0, len(toRetry))
-	for topic := range toRetry {
-		ret = append(ret, topic)
-	}
-	return ret, nil
-}
-
-// Creates a new ClientConfig instance with sensible defaults
-func NewClientConfig() *ClientConfig {
-	return &ClientConfig{
-		MetadataRetries: 3,
-		WaitForElection: 250 * time.Millisecond,
-	}
-}
-
-// Validates a ClientConfig instance. This will return a
-// ConfigurationError if the specified values don't make sense.
-func (config *ClientConfig) Validate() error {
-	if config.MetadataRetries <= 0 {
-		return ConfigurationError("Invalid MetadataRetries. Try 10")
-	}
-
-	if config.WaitForElection <= time.Duration(0) {
-		return ConfigurationError("Invalid WaitForElection. Try 250*time.Millisecond")
-	}
-
-	if config.DefaultBrokerConf != nil {
-		if err := config.DefaultBrokerConf.Validate(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}

+ 0 - 114
client_test.go

@@ -1,114 +0,0 @@
-package sarama
-
-import (
-	"testing"
-)
-
-func TestSimpleClient(t *testing.T) {
-
-	mb := NewMockBroker(t, 1)
-
-	mb.Returns(new(MetadataResponse))
-
-	client, err := NewClient("client_id", []string{mb.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer client.Close()
-	defer mb.Close()
-}
-
-func TestClientExtraBrokers(t *testing.T) {
-
-	mb1 := NewMockBroker(t, 1)
-	mb2 := NewMockBroker(t, 2)
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb2.Addr(), int32(mb2.BrokerID()))
-	mb1.Returns(mdr)
-
-	client, err := NewClient("client_id", []string{mb1.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer client.Close()
-	defer mb1.Close()
-	defer mb2.Close()
-}
-
-func TestClientMetadata(t *testing.T) {
-
-	mb1 := NewMockBroker(t, 1)
-	mb5 := NewMockBroker(t, 5)
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb5.Addr(), int32(mb5.BrokerID()))
-	mdr.AddTopicPartition("my_topic", 0, int32(mb5.BrokerID()))
-	mb1.Returns(mdr)
-
-	client, err := NewClient("client_id", []string{mb1.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer client.Close()
-	defer mb1.Close()
-	defer mb5.Close()
-
-	topics, err := client.Topics()
-	if err != nil {
-		t.Error(err)
-	} else if len(topics) != 1 || topics[0] != "my_topic" {
-		t.Error("Client returned incorrect topics:", topics)
-	}
-
-	parts, err := client.Partitions("my_topic")
-	if err != nil {
-		t.Error(err)
-	} else if len(parts) != 1 || parts[0] != 0 {
-		t.Error("Client returned incorrect partitions for my_topic:", parts)
-	}
-
-	tst, err := client.Leader("my_topic", 0)
-	if err != nil {
-		t.Error(err)
-	} else if tst.ID() != 5 {
-		t.Error("Leader for my_topic had incorrect ID.")
-	}
-}
-
-func TestClientRefreshBehaviour(t *testing.T) {
-	mb1 := NewMockBroker(t, 1)
-	mb5 := NewMockBroker(t, 5)
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb5.Addr(), int32(mb5.BrokerID()))
-	mb1.Returns(mdr)
-
-	mdr2 := new(MetadataResponse)
-	mdr2.AddTopicPartition("my_topic", 0xb, int32(mb5.BrokerID()))
-	mb5.Returns(mdr2)
-
-	client, err := NewClient("clientID", []string{mb1.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer client.Close()
-	defer mb1.Close()
-	defer mb5.Close()
-
-	parts, err := client.Partitions("my_topic")
-	if err != nil {
-		t.Error(err)
-	} else if len(parts) != 1 || parts[0] != 0xb {
-		t.Error("Client returned incorrect partitions for my_topic:", parts)
-	}
-
-	tst, err := client.Leader("my_topic", 0xb)
-	if err != nil {
-		t.Error(err)
-	} else if tst.ID() != 5 {
-		t.Error("Leader for my_topic had incorrect ID.")
-	}
-
-	client.disconnectBroker(tst)
-}

+ 0 - 358
consumer.go

@@ -1,358 +0,0 @@
-package sarama
-
-// OffsetMethod is passed in ConsumerConfig to tell the consumer how to determine the starting offset.
-type OffsetMethod int
-
-const (
-	// OffsetMethodManual causes the consumer to interpret the OffsetValue in the ConsumerConfig as the
-	// offset at which to start, allowing the user to manually specify their desired starting offset.
-	OffsetMethodManual OffsetMethod = iota
-	// OffsetMethodNewest causes the consumer to start at the most recent available offset, as
-	// determined by querying the broker.
-	OffsetMethodNewest
-	// OffsetMethodOldest causes the consumer to start at the oldest available offset, as
-	// determined by querying the broker.
-	OffsetMethodOldest
-)
-
-// ConsumerConfig is used to pass multiple configuration options to NewConsumer.
-type ConsumerConfig struct {
-	// The default (maximum) amount of data to fetch from the broker in each request. The default of 0 is treated as 1024 bytes.
-	DefaultFetchSize int32
-	// The minimum amount of data to fetch in a request - the broker will wait until at least this many bytes are available.
-	// The default of 0 is treated as 'at least one' to prevent the consumer from spinning when no messages are available.
-	MinFetchSize int32
-	// The maximum permittable message size - messages larger than this will return MessageTooLarge. The default of 0 is
-	// treated as no limit.
-	MaxMessageSize int32
-	// The maximum amount of time (in ms) the broker will wait for MinFetchSize bytes to become available before it
-	// returns fewer than that anyways. The default of 0 causes Kafka to return immediately, which is rarely desirable
-	// as it causes the Consumer to spin when no events are available. 100-500ms is a reasonable range for most cases.
-	MaxWaitTime int32
-
-	// The method used to determine at which offset to begin consuming messages.
-	OffsetMethod OffsetMethod
-	// Interpreted differently according to the value of OffsetMethod.
-	OffsetValue int64
-
-	// The number of events to buffer in the Events channel. Setting this can let the
-	// consumer continue fetching messages in the background while local code consumes events,
-	// greatly improving throughput.
-	EventBufferSize int
-}
-
-// ConsumerEvent is what is provided to the user when an event occurs. It is either an error (in which case Err is non-nil) or
-// a message (in which case Err is nil and Offset, Key, and Value are set). Topic and Partition are always set.
-type ConsumerEvent struct {
-	Key, Value []byte
-	Topic      string
-	Partition  int32
-	Offset     int64
-	Err        error
-}
-
-// Consumer processes Kafka messages from a given topic and partition.
-// You MUST call Close() on a consumer to avoid leaks, it will not be garbage-collected automatically when
-// it passes out of scope (this is in addition to calling Close on the underlying client, which is still necessary).
-type Consumer struct {
-	client *Client
-
-	topic     string
-	partition int32
-	group     string
-	config    ConsumerConfig
-
-	offset        int64
-	broker        *Broker
-	stopper, done chan bool
-	events        chan *ConsumerEvent
-}
-
-// NewConsumer creates a new consumer attached to the given client. It will read messages from the given topic and partition, as
-// part of the named consumer group.
-func NewConsumer(client *Client, topic string, partition int32, group string, config *ConsumerConfig) (*Consumer, error) {
-	if config == nil {
-		config = NewConsumerConfig()
-	}
-
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
-	if topic == "" {
-		return nil, ConfigurationError("Empty topic")
-	}
-
-	broker, err := client.Leader(topic, partition)
-	if err != nil {
-		return nil, err
-	}
-
-	c := &Consumer{
-		client:    client,
-		topic:     topic,
-		partition: partition,
-		group:     group,
-		config:    *config,
-		broker:    broker,
-		stopper:   make(chan bool),
-		done:      make(chan bool),
-		events:    make(chan *ConsumerEvent, config.EventBufferSize),
-	}
-
-	switch config.OffsetMethod {
-	case OffsetMethodManual:
-		if config.OffsetValue < 0 {
-			return nil, ConfigurationError("OffsetValue cannot be < 0 when OffsetMethod is MANUAL")
-		}
-		c.offset = config.OffsetValue
-	case OffsetMethodNewest:
-		c.offset, err = c.getOffset(LatestOffsets, true)
-		if err != nil {
-			return nil, err
-		}
-	case OffsetMethodOldest:
-		c.offset, err = c.getOffset(EarliestOffset, true)
-		if err != nil {
-			return nil, err
-		}
-	default:
-		return nil, ConfigurationError("Invalid OffsetMethod")
-	}
-
-	go withRecover(c.fetchMessages)
-
-	return c, nil
-}
-
-// Events returns the read channel for any events (messages or errors) that might be returned by the broker.
-func (c *Consumer) Events() <-chan *ConsumerEvent {
-	return c.events
-}
-
-// Close stops the consumer from fetching messages. It is required to call this function before
-// a consumer object passes out of scope, as it will otherwise leak memory. You must call this before
-// calling Close on the underlying client.
-func (c *Consumer) Close() error {
-	close(c.stopper)
-	<-c.done
-	return nil
-}
-
-// helper function for safely sending an error on the errors channel
-// if it returns true, the error was sent (or was nil)
-// if it returns false, the stopper channel signaled that your goroutine should return!
-func (c *Consumer) sendError(err error) bool {
-	if err == nil {
-		return true
-	}
-
-	select {
-	case <-c.stopper:
-		close(c.events)
-		close(c.done)
-		return false
-	case c.events <- &ConsumerEvent{Err: err, Topic: c.topic, Partition: c.partition}:
-		return true
-	}
-}
-
-func (c *Consumer) fetchMessages() {
-
-	fetchSize := c.config.DefaultFetchSize
-
-	for {
-		request := new(FetchRequest)
-		request.MinBytes = c.config.MinFetchSize
-		request.MaxWaitTime = c.config.MaxWaitTime
-		request.AddBlock(c.topic, c.partition, c.offset, fetchSize)
-
-		response, err := c.broker.Fetch(c.client.id, request)
-		switch {
-		case err == nil:
-			break
-		case err == EncodingError:
-			if c.sendError(err) {
-				continue
-			} else {
-				return
-			}
-		default:
-			Logger.Printf("Unexpected error processing FetchRequest; disconnecting broker %s: %s\n", c.broker.addr, err)
-			c.client.disconnectBroker(c.broker)
-			for c.broker, err = c.client.Leader(c.topic, c.partition); err != nil; c.broker, err = c.client.Leader(c.topic, c.partition) {
-				if !c.sendError(err) {
-					return
-				}
-			}
-			continue
-		}
-
-		block := response.GetBlock(c.topic, c.partition)
-		if block == nil {
-			if c.sendError(IncompleteResponse) {
-				continue
-			} else {
-				return
-			}
-		}
-
-		switch block.Err {
-		case NoError:
-			break
-		case UnknownTopicOrPartition, NotLeaderForPartition, LeaderNotAvailable:
-			err = c.client.RefreshTopicMetadata(c.topic)
-			if c.sendError(err) {
-				for c.broker, err = c.client.Leader(c.topic, c.partition); err != nil; c.broker, err = c.client.Leader(c.topic, c.partition) {
-					if !c.sendError(err) {
-						return
-					}
-				}
-				continue
-			} else {
-				return
-			}
-		default:
-			if c.sendError(block.Err) {
-				continue
-			} else {
-				return
-			}
-		}
-
-		if len(block.MsgSet.Messages) == 0 {
-			// We got no messages. If we got a trailing one then we need to ask for more data.
-			// Otherwise we just poll again and wait for one to be produced...
-			if block.MsgSet.PartialTrailingMessage {
-				if c.config.MaxMessageSize == 0 {
-					fetchSize *= 2
-				} else {
-					if fetchSize == c.config.MaxMessageSize {
-						if c.sendError(MessageTooLarge) {
-							continue
-						} else {
-							return
-						}
-					} else {
-						fetchSize *= 2
-						if fetchSize > c.config.MaxMessageSize {
-							fetchSize = c.config.MaxMessageSize
-						}
-					}
-				}
-			}
-			select {
-			case <-c.stopper:
-				close(c.events)
-				close(c.done)
-				return
-			default:
-				continue
-			}
-		} else {
-			fetchSize = c.config.DefaultFetchSize
-		}
-
-		for _, msgBlock := range block.MsgSet.Messages {
-			for _, msg := range msgBlock.Messages() {
-				select {
-				case <-c.stopper:
-					close(c.events)
-					close(c.done)
-					return
-				case c.events <- &ConsumerEvent{Key: msg.Msg.Key, Value: msg.Msg.Value, Offset: msg.Offset, Topic: c.topic, Partition: c.partition}:
-					c.offset++
-				}
-			}
-		}
-	}
-}
-
-func (c *Consumer) getOffset(where OffsetTime, retry bool) (int64, error) {
-	request := &OffsetRequest{}
-	request.AddBlock(c.topic, c.partition, where, 1)
-
-	response, err := c.broker.GetAvailableOffsets(c.client.id, request)
-	switch err {
-	case nil:
-		break
-	case EncodingError:
-		return -1, err
-	default:
-		if !retry {
-			return -1, err
-		}
-		Logger.Printf("Unexpected error processing OffsetRequest; disconnecting broker %s: %s\n", c.broker.addr, err)
-		c.client.disconnectBroker(c.broker)
-		c.broker, err = c.client.Leader(c.topic, c.partition)
-		if err != nil {
-			return -1, err
-		}
-		return c.getOffset(where, false)
-	}
-
-	block := response.GetBlock(c.topic, c.partition)
-	if block == nil {
-		return -1, IncompleteResponse
-	}
-
-	switch block.Err {
-	case NoError:
-		if len(block.Offsets) < 1 {
-			return -1, IncompleteResponse
-		}
-		return block.Offsets[0], nil
-	case UnknownTopicOrPartition, NotLeaderForPartition, LeaderNotAvailable:
-		if !retry {
-			return -1, block.Err
-		}
-		err = c.client.RefreshTopicMetadata(c.topic)
-		if err != nil {
-			return -1, err
-		}
-		c.broker, err = c.client.Leader(c.topic, c.partition)
-		if err != nil {
-			return -1, err
-		}
-		return c.getOffset(where, false)
-	}
-
-	return -1, block.Err
-}
-
-// Creates a ConsumerConfig instance with sane defaults.
-func NewConsumerConfig() *ConsumerConfig {
-	return &ConsumerConfig{
-		DefaultFetchSize: 1024,
-		MinFetchSize:     1,
-		MaxWaitTime:      250,
-	}
-}
-
-// Validates a ConsumerConfig instance. It will return a
-// ConfigurationError if the specified value doesn't make sense.
-func (config *ConsumerConfig) Validate() error {
-	if config.DefaultFetchSize <= 0 {
-		return ConfigurationError("Invalid DefaultFetchSize")
-	}
-
-	if config.MinFetchSize <= 0 {
-		return ConfigurationError("Invalid MinFetchSize")
-	}
-
-	if config.MaxMessageSize < 0 {
-		return ConfigurationError("Invalid MaxMessageSize")
-	}
-
-	if config.MaxWaitTime <= 0 {
-		return ConfigurationError("Invalid MaxWaitTime")
-	} else if config.MaxWaitTime < 100 {
-		Logger.Println("ConsumerConfig.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
-	}
-
-	if config.EventBufferSize < 0 {
-		return ConfigurationError("Invalid EventBufferSize")
-	}
-
-	return nil
-}

+ 0 - 17
consumer_metadata_request.go

@@ -1,17 +0,0 @@
-package sarama
-
-type ConsumerMetadataRequest struct {
-	ConsumerGroup string
-}
-
-func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
-	return pe.putString(r.ConsumerGroup)
-}
-
-func (r *ConsumerMetadataRequest) key() int16 {
-	return 10
-}
-
-func (r *ConsumerMetadataRequest) version() int16 {
-	return 0
-}

+ 0 - 19
consumer_metadata_request_test.go

@@ -1,19 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	consumerMetadataRequestEmpty = []byte{
-		0x00, 0x00}
-
-	consumerMetadataRequestString = []byte{
-		0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'}
-)
-
-func TestConsumerMetadataRequest(t *testing.T) {
-	request := new(ConsumerMetadataRequest)
-	testEncodable(t, "empty string", request, consumerMetadataRequestEmpty)
-
-	request.ConsumerGroup = "foobar"
-	testEncodable(t, "with string", request, consumerMetadataRequestString)
-}

+ 0 - 33
consumer_metadata_response.go

@@ -1,33 +0,0 @@
-package sarama
-
-type ConsumerMetadataResponse struct {
-	Err             KError
-	CoordinatorId   int32
-	CoordinatorHost string
-	CoordinatorPort int32
-}
-
-func (r *ConsumerMetadataResponse) decode(pd packetDecoder) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.Err = KError(tmp)
-
-	r.CoordinatorId, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	r.CoordinatorHost, err = pd.getString()
-	if err != nil {
-		return err
-	}
-
-	r.CoordinatorPort, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}

+ 0 - 61
consumer_metadata_response_test.go

@@ -1,61 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	consumerMetadataResponseError = []byte{
-		0x00, 0x0E,
-		0x00, 0x00, 0x00, 0x00,
-		0x00, 0x00,
-		0x00, 0x00, 0x00, 0x00}
-
-	consumerMetadataResponseSuccess = []byte{
-		0x00, 0x00,
-		0x00, 0x00, 0x00, 0xAB,
-		0x00, 0x03, 'f', 'o', 'o',
-		0x00, 0x00, 0xCC, 0xDD}
-)
-
-func TestConsumerMetadataResponseError(t *testing.T) {
-	response := ConsumerMetadataResponse{}
-
-	testDecodable(t, "error", &response, consumerMetadataResponseError)
-
-	if response.Err != OffsetsLoadInProgress {
-		t.Error("Decoding produced incorrect error value.")
-	}
-
-	if response.CoordinatorId != 0 {
-		t.Error("Decoding produced incorrect ID.")
-	}
-
-	if len(response.CoordinatorHost) != 0 {
-		t.Error("Decoding produced incorrect host.")
-	}
-
-	if response.CoordinatorPort != 0 {
-		t.Error("Decoding produced incorrect port.")
-	}
-}
-
-func TestConsumerMetadataResponseSuccess(t *testing.T) {
-	response := ConsumerMetadataResponse{}
-
-	testDecodable(t, "success", &response, consumerMetadataResponseSuccess)
-
-	if response.Err != NoError {
-		t.Error("Decoding produced error value where there was none.")
-	}
-
-	if response.CoordinatorId != 0xAB {
-		t.Error("Decoding produced incorrect coordinator ID.")
-	}
-
-	if response.CoordinatorHost != "foo" {
-		t.Error("Decoding produced incorrect coordinator host.")
-	}
-
-	if response.CoordinatorPort != 0xCCDD {
-		t.Error("Decoding produced incorrect coordinator port.")
-	}
-}

+ 0 - 152
consumer_test.go

@@ -1,152 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"testing"
-	"time"
-)
-
-func TestSimpleConsumer(t *testing.T) {
-	mb1 := NewMockBroker(t, 1)
-	mb2 := NewMockBroker(t, 2)
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb2.Addr(), int32(mb2.BrokerID()))
-	mdr.AddTopicPartition("my_topic", 0, 2)
-	mb1.Returns(mdr)
-
-	for i := 0; i < 10; i++ {
-		fr := new(FetchResponse)
-		fr.AddMessage("my_topic", 0, nil, ByteEncoder([]byte{0x00, 0x0E}), int64(i))
-		mb2.Returns(fr)
-	}
-
-	client, err := NewClient("client_id", []string{mb1.Addr()}, nil)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer client.Close()
-
-	consumer, err := NewConsumer(client, "my_topic", 0, "my_consumer_group", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer consumer.Close()
-	defer mb1.Close()
-	defer mb2.Close()
-
-	for i := 0; i < 10; i++ {
-		event := <-consumer.Events()
-		if event.Err != nil {
-			t.Error(err)
-		}
-		if event.Offset != int64(i) {
-			t.Error("Incorrect message offset!")
-		}
-	}
-
-}
-
-func TestConsumerRawOffset(t *testing.T) {
-
-	mb1 := NewMockBroker(t, 1)
-	mb2 := NewMockBroker(t, 2)
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb2.Addr(), int32(mb2.BrokerID()))
-	mdr.AddTopicPartition("my_topic", 0, 2)
-	mb1.Returns(mdr)
-
-	client, err := NewClient("client_id", []string{mb1.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer client.Close()
-
-	config := NewConsumerConfig()
-	config.OffsetMethod = OffsetMethodManual
-	config.OffsetValue = 1234
-	consumer, err := NewConsumer(client, "my_topic", 0, "my_consumer_group", config)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer consumer.Close()
-
-	defer mb1.Close()
-	defer mb2.Close()
-
-	if consumer.offset != 1234 {
-		t.Error("Raw offset not set correctly")
-	}
-}
-
-func TestConsumerLatestOffset(t *testing.T) {
-
-	mb1 := NewMockBroker(t, 1)
-	mb2 := NewMockBroker(t, 2)
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb2.Addr(), int32(mb2.BrokerID()))
-	mdr.AddTopicPartition("my_topic", 0, 2)
-	mb1.Returns(mdr)
-
-	or := new(OffsetResponse)
-	or.AddTopicPartition("my_topic", 0, 0x010101)
-	mb2.Returns(or)
-
-	client, err := NewClient("client_id", []string{mb1.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer client.Close()
-
-	config := NewConsumerConfig()
-	config.OffsetMethod = OffsetMethodNewest
-	consumer, err := NewConsumer(client, "my_topic", 0, "my_consumer_group", config)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer consumer.Close()
-
-	defer mb2.Close()
-	defer mb1.Close()
-
-	if consumer.offset != 0x010101 {
-		t.Error("Latest offset not fetched correctly")
-	}
-}
-
-func ExampleConsumer() {
-	client, err := NewClient("my_client", []string{"localhost:9092"}, nil)
-	if err != nil {
-		panic(err)
-	} else {
-		fmt.Println("> connected")
-	}
-	defer client.Close()
-
-	consumer, err := NewConsumer(client, "my_topic", 0, "my_consumer_group", NewConsumerConfig())
-	if err != nil {
-		panic(err)
-	} else {
-		fmt.Println("> consumer ready")
-	}
-	defer consumer.Close()
-
-	msgCount := 0
-consumerLoop:
-	for {
-		select {
-		case event := <-consumer.Events():
-			if event.Err != nil {
-				panic(event.Err)
-			}
-			msgCount++
-		case <-time.After(5 * time.Second):
-			fmt.Println("> timed out")
-			break consumerLoop
-		}
-	}
-	fmt.Println("Got", msgCount, "messages.")
-}

+ 0 - 35
crc32_field.go

@@ -1,35 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"hash/crc32"
-)
-
-// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
-type crc32Field struct {
-	startOffset int
-}
-
-func (c *crc32Field) saveOffset(in int) {
-	c.startOffset = in
-}
-
-func (c *crc32Field) reserveLength() int {
-	return 4
-}
-
-func (c *crc32Field) run(curOffset int, buf []byte) error {
-	crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
-	binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
-	return nil
-}
-
-func (c *crc32Field) check(curOffset int, buf []byte) error {
-	crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
-
-	if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
-		return DecodingError{Info: "CRC didn't match"}
-	}
-
-	return nil
-}

+ 0 - 56
encoder_decoder.go

@@ -1,56 +0,0 @@
-package sarama
-
-// Encoder is the interface that wraps the basic Encode method.
-// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
-type encoder interface {
-	encode(pe packetEncoder) error
-}
-
-// Encode takes an Encoder and turns it into bytes.
-func encode(in encoder) ([]byte, error) {
-	if in == nil {
-		return nil, nil
-	}
-
-	var prepEnc prepEncoder
-	var realEnc realEncoder
-
-	err := in.encode(&prepEnc)
-	if err != nil {
-		return nil, err
-	}
-
-	realEnc.raw = make([]byte, prepEnc.length)
-	err = in.encode(&realEnc)
-	if err != nil {
-		return nil, err
-	}
-
-	return realEnc.raw, nil
-}
-
-// Decoder is the interface that wraps the basic Decode method.
-// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
-type decoder interface {
-	decode(pd packetDecoder) error
-}
-
-// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
-// interpreted using Kafka's encoding rules.
-func decode(buf []byte, in decoder) error {
-	if buf == nil {
-		return nil
-	}
-
-	helper := realDecoder{raw: buf}
-	err := in.decode(&helper)
-	if err != nil {
-		return err
-	}
-
-	if helper.off != len(buf) {
-		return DecodingError{Info: "Length was invalid"}
-	}
-
-	return nil
-}

+ 0 - 136
errors.go

@@ -1,136 +0,0 @@
-package sarama
-
-import (
-	"errors"
-	"fmt"
-)
-
-// OutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
-// or otherwise failed to respond.
-var OutOfBrokers = errors.New("kafka: Client has run out of available brokers to talk to. Is your cluster reachable?")
-
-// NoSuchTopic is the error returned when the supplied topic is rejected by the Kafka servers.
-var NoSuchTopic = errors.New("kafka: Topic not recognized by brokers.")
-
-// IncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
-// not contain the expected information.
-var IncompleteResponse = errors.New("kafka: Response did not contain all the expected topic/partition blocks.")
-
-// InvalidPartition is the error returned when a partitioner returns an invalid partition index
-// (meaning one outside of the range [0...numPartitions-1]).
-var InvalidPartition = errors.New("kafka: Partitioner returned an invalid partition index.")
-
-// AlreadyConnected is the error returned when calling Open() on a Broker that is already connected.
-var AlreadyConnected = errors.New("kafka: broker: already connected")
-
-// NotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
-var NotConnected = errors.New("kafka: broker: not connected")
-
-// EncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
-// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
-var EncodingError = errors.New("kafka: Error while encoding packet.")
-
-// InsufficientData is returned when decoding and the packet is truncated. This can be expected
-// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
-// of the message set.
-var InsufficientData = errors.New("kafka: Insufficient data to decode packet, more bytes expected.")
-
-// DecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
-// This can be a bad CRC or length field, or any other invalid value.
-type DecodingError struct {
-	Info string
-}
-
-func (err DecodingError) Error() string {
-	return fmt.Sprintf("kafka: Error while decoding packet: %s", err.Info)
-}
-
-// MessageTooLarge is returned when the next message to consume is larger than the configured MaxFetchSize
-var MessageTooLarge = errors.New("kafka: Message is larger than MaxFetchSize")
-
-// ConfigurationError is the type of error returned from NewClient, NewProducer or NewConsumer when the specified
-// configuration is invalid.
-type ConfigurationError string
-
-func (err ConfigurationError) Error() string {
-	return "kafka: Invalid Configuration: " + string(err)
-}
-
-// DroppedMessagesError is returned from a producer when messages weren't able to be successfully delivered to a broker.
-type DroppedMessagesError struct {
-	DroppedMessages int
-	Err             error
-}
-
-func (err DroppedMessagesError) Error() string {
-	if err.Err != nil {
-		return fmt.Sprintf("kafka: Dropped %d messages: %s", err.DroppedMessages, err.Err.Error())
-	} else {
-		return fmt.Sprintf("kafka: Dropped %d messages", err.DroppedMessages)
-	}
-}
-
-// KError is the type of error that can be returned directly by the Kafka broker.
-// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
-type KError int16
-
-// Numeric error codes returned by the Kafka server.
-const (
-	NoError                         KError = 0
-	Unknown                         KError = -1
-	OffsetOutOfRange                KError = 1
-	InvalidMessage                  KError = 2
-	UnknownTopicOrPartition         KError = 3
-	InvalidMessageSize              KError = 4
-	LeaderNotAvailable              KError = 5
-	NotLeaderForPartition           KError = 6
-	RequestTimedOut                 KError = 7
-	BrokerNotAvailable              KError = 8
-	MessageSizeTooLarge             KError = 10
-	StaleControllerEpochCode        KError = 11
-	OffsetMetadataTooLarge          KError = 12
-	OffsetsLoadInProgress           KError = 14
-	ConsumerCoordinatorNotAvailable KError = 15
-	NotCoordinatorForConsumer       KError = 16
-)
-
-func (err KError) Error() string {
-	// Error messages stolen/adapted from
-	// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
-	switch err {
-	case NoError:
-		return "kafka server: Not an error, why are you printing me?"
-	case Unknown:
-		return "kafka server: Unexpected (unknown?) server error."
-	case OffsetOutOfRange:
-		return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
-	case InvalidMessage:
-		return "kafka server: Message contents does not match its CRC."
-	case UnknownTopicOrPartition:
-		return "kafka server: Request was for a topic or partition that does not exist on this broker."
-	case InvalidMessageSize:
-		return "kafka server: The message has a negative size."
-	case LeaderNotAvailable:
-		return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
-	case NotLeaderForPartition:
-		return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
-	case RequestTimedOut:
-		return "kafka server: Request exceeded the user-specified time limit in the request."
-	case BrokerNotAvailable:
-		return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
-	case MessageSizeTooLarge:
-		return "kafka server: Message was too large, server rejected it to avoid allocation error."
-	case StaleControllerEpochCode:
-		return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
-	case OffsetMetadataTooLarge:
-		return "kafka server: Specified a string larger than the configured maximum for offset metadata."
-	case OffsetsLoadInProgress:
-		return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
-	case ConsumerCoordinatorNotAvailable:
-		return "kafka server: Offset's topic has not yet been created."
-	case NotCoordinatorForConsumer:
-		return "kafka server: Request was for a consumer group that is not coordinated by this broker."
-	}
-
-	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
-}

+ 0 - 70
fetch_request.go

@@ -1,70 +0,0 @@
-package sarama
-
-type fetchRequestBlock struct {
-	fetchOffset int64
-	maxBytes    int32
-}
-
-func (f *fetchRequestBlock) encode(pe packetEncoder) error {
-	pe.putInt64(f.fetchOffset)
-	pe.putInt32(f.maxBytes)
-	return nil
-}
-
-type FetchRequest struct {
-	MaxWaitTime int32
-	MinBytes    int32
-	blocks      map[string]map[int32]*fetchRequestBlock
-}
-
-func (f *FetchRequest) encode(pe packetEncoder) (err error) {
-	pe.putInt32(-1) // replica ID is always -1 for clients
-	pe.putInt32(f.MaxWaitTime)
-	pe.putInt32(f.MinBytes)
-	err = pe.putArrayLength(len(f.blocks))
-	if err != nil {
-		return err
-	}
-	for topic, blocks := range f.blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(blocks))
-		if err != nil {
-			return err
-		}
-		for partition, block := range blocks {
-			pe.putInt32(partition)
-			err = block.encode(pe)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (f *FetchRequest) key() int16 {
-	return 1
-}
-
-func (f *FetchRequest) version() int16 {
-	return 0
-}
-
-func (f *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
-	if f.blocks == nil {
-		f.blocks = make(map[string]map[int32]*fetchRequestBlock)
-	}
-
-	if f.blocks[topic] == nil {
-		f.blocks[topic] = make(map[int32]*fetchRequestBlock)
-	}
-
-	tmp := new(fetchRequestBlock)
-	tmp.maxBytes = maxBytes
-	tmp.fetchOffset = fetchOffset
-
-	f.blocks[topic][partitionID] = tmp
-}

+ 0 - 34
fetch_request_test.go

@@ -1,34 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	fetchRequestNoBlocks = []byte{
-		0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-		0x00, 0x00, 0x00, 0x00}
-
-	fetchRequestWithProperties = []byte{
-		0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF,
-		0x00, 0x00, 0x00, 0x00}
-
-	fetchRequestOneBlock = []byte{
-		0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x05, 't', 'o', 'p', 'i', 'c',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
-)
-
-func TestFetchRequest(t *testing.T) {
-	request := new(FetchRequest)
-	testEncodable(t, "no blocks", request, fetchRequestNoBlocks)
-
-	request.MaxWaitTime = 0x20
-	request.MinBytes = 0xEF
-	testEncodable(t, "with properties", request, fetchRequestWithProperties)
-
-	request.MaxWaitTime = 0
-	request.MinBytes = 0
-	request.AddBlock("topic", 0x12, 0x34, 0x56)
-	testEncodable(t, "one block", request, fetchRequestOneBlock)
-}

+ 0 - 155
fetch_response.go

@@ -1,155 +0,0 @@
-package sarama
-
-type FetchResponseBlock struct {
-	Err                 KError
-	HighWaterMarkOffset int64
-	MsgSet              MessageSet
-}
-
-func (pr *FetchResponseBlock) decode(pd packetDecoder) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	pr.Err = KError(tmp)
-
-	pr.HighWaterMarkOffset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	msgSetSize, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	msgSetDecoder, err := pd.getSubset(int(msgSetSize))
-	if err != nil {
-		return err
-	}
-	err = (&pr.MsgSet).decode(msgSetDecoder)
-
-	return err
-}
-
-type FetchResponse struct {
-	Blocks map[string]map[int32]*FetchResponseBlock
-}
-
-func (pr *FetchResponseBlock) encode(pe packetEncoder) (err error) {
-	pe.putInt16(int16(pr.Err))
-
-	pe.putInt64(pr.HighWaterMarkOffset)
-
-	pe.push(&lengthField{})
-	err = pr.MsgSet.encode(pe)
-	if err != nil {
-		return err
-	}
-	return pe.pop()
-}
-
-func (fr *FetchResponse) decode(pd packetDecoder) (err error) {
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	fr.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numBlocks, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		fr.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
-
-		for j := 0; j < numBlocks; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := new(FetchResponseBlock)
-			err = block.decode(pd)
-			if err != nil {
-				return err
-			}
-			fr.Blocks[name][id] = block
-		}
-	}
-
-	return nil
-}
-
-func (fr *FetchResponse) encode(pe packetEncoder) (err error) {
-	err = pe.putArrayLength(len(fr.Blocks))
-	if err != nil {
-		return err
-	}
-
-	for topic, partitions := range fr.Blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-
-		for id, block := range partitions {
-			pe.putInt32(id)
-			err = block.encode(pe)
-			if err != nil {
-				return err
-			}
-		}
-
-	}
-	return nil
-}
-
-func (fr *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
-	if fr.Blocks == nil {
-		return nil
-	}
-
-	if fr.Blocks[topic] == nil {
-		return nil
-	}
-
-	return fr.Blocks[topic][partition]
-}
-
-func (fr *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
-	if fr.Blocks == nil {
-		fr.Blocks = make(map[string]map[int32]*FetchResponseBlock)
-	}
-	partitions, ok := fr.Blocks[topic]
-	if !ok {
-		partitions = make(map[int32]*FetchResponseBlock)
-		fr.Blocks[topic] = partitions
-	}
-	frb := new(FetchResponseBlock)
-	partitions[partition] = frb
-	var kb []byte
-	var vb []byte
-	if key != nil {
-		kb, _ = key.Encode()
-	}
-	if value != nil {
-		vb, _ = value.Encode()
-	}
-	var msgSet MessageSet
-	msg := &Message{Key: kb, Value: vb}
-	msgBlock := &MessageBlock{Msg: msg, Offset: offset}
-	msgSet.Messages = append(msgSet.Messages, msgBlock)
-	frb.MsgSet = msgSet
-}

+ 0 - 84
fetch_response_test.go

@@ -1,84 +0,0 @@
-package sarama
-
-import (
-	"bytes"
-	"testing"
-)
-
-var (
-	emptyFetchResponse = []byte{
-		0x00, 0x00, 0x00, 0x00}
-
-	oneMessageFetchResponse = []byte{
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x05, 't', 'o', 'p', 'i', 'c',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00, 0x00, 0x05,
-		0x00, 0x01,
-		0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10,
-		0x00, 0x00, 0x00, 0x1C,
-		// messageSet
-		0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00,
-		0x00, 0x00, 0x00, 0x10,
-		// message
-		0x23, 0x96, 0x4a, 0xf7, // CRC
-		0x00,
-		0x00,
-		0xFF, 0xFF, 0xFF, 0xFF,
-		0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
-)
-
-func TestEmptyFetchResponse(t *testing.T) {
-	response := FetchResponse{}
-	testDecodable(t, "empty", &response, emptyFetchResponse)
-
-	if len(response.Blocks) != 0 {
-		t.Error("Decoding produced topic blocks where there were none.")
-	}
-
-}
-
-func TestOneMessageFetchResponse(t *testing.T) {
-	response := FetchResponse{}
-	testDecodable(t, "one message", &response, oneMessageFetchResponse)
-
-	if len(response.Blocks) != 1 {
-		t.Fatal("Decoding produced incorrect number of topic blocks.")
-	}
-
-	if len(response.Blocks["topic"]) != 1 {
-		t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
-	}
-
-	block := response.GetBlock("topic", 5)
-	if block == nil {
-		t.Fatal("GetBlock didn't return block.")
-	}
-	if block.Err != OffsetOutOfRange {
-		t.Error("Decoding didn't produce correct error code.")
-	}
-	if block.HighWaterMarkOffset != 0x10101010 {
-		t.Error("Decoding didn't produce correct high water mark offset.")
-	}
-	if block.MsgSet.PartialTrailingMessage {
-		t.Error("Decoding detected a partial trailing message where there wasn't one.")
-	}
-
-	if len(block.MsgSet.Messages) != 1 {
-		t.Fatal("Decoding produced incorrect number of messages.")
-	}
-	msgBlock := block.MsgSet.Messages[0]
-	if msgBlock.Offset != 0x550000 {
-		t.Error("Decoding produced incorrect message offset.")
-	}
-	msg := msgBlock.Msg
-	if msg.Codec != CompressionNone {
-		t.Error("Decoding produced incorrect message compression.")
-	}
-	if msg.Key != nil {
-		t.Error("Decoding produced message key where there was none.")
-	}
-	if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
-		t.Error("Decoding produced incorrect message value.")
-	}
-}

+ 4 - 0
README.md → index.md

@@ -1,3 +1,7 @@
+---
+layout: index
+---
+
 sarama
 ======
 

+ 0 - 29
length_field.go

@@ -1,29 +0,0 @@
-package sarama
-
-import "encoding/binary"
-
-// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
-type lengthField struct {
-	startOffset int
-}
-
-func (l *lengthField) saveOffset(in int) {
-	l.startOffset = in
-}
-
-func (l *lengthField) reserveLength() int {
-	return 4
-}
-
-func (l *lengthField) run(curOffset int, buf []byte) error {
-	binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
-	return nil
-}
-
-func (l *lengthField) check(curOffset int, buf []byte) error {
-	if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
-		return DecodingError{Info: "Lengthfield check failed"}
-	}
-
-	return nil
-}

+ 0 - 153
message.go

@@ -1,153 +0,0 @@
-package sarama
-
-import (
-	"bytes"
-	"compress/gzip"
-	"io/ioutil"
-)
-
-// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
-type CompressionCodec int8
-
-// only the last two bits are really used
-const compressionCodecMask int8 = 0x03
-
-const (
-	CompressionNone   CompressionCodec = 0
-	CompressionGZIP   CompressionCodec = 1
-	CompressionSnappy CompressionCodec = 2
-)
-
-// The spec just says: "This is a version id used to allow backwards compatible evolution of the message
-// binary format." but it doesn't say what the current value is, so presumably 0...
-const messageFormat int8 = 0
-
-type Message struct {
-	Codec CompressionCodec // codec used to compress the message contents
-	Key   []byte           // the message key, may be nil
-	Value []byte           // the message contents
-	Set   *MessageSet      // the message set a message might wrap
-
-	compressedCache []byte
-}
-
-func (m *Message) encode(pe packetEncoder) error {
-	pe.push(&crc32Field{})
-
-	pe.putInt8(messageFormat)
-
-	attributes := int8(m.Codec) & compressionCodecMask
-	pe.putInt8(attributes)
-
-	err := pe.putBytes(m.Key)
-	if err != nil {
-		return err
-	}
-
-	var payload []byte
-
-	if m.compressedCache != nil {
-		payload = m.compressedCache
-		m.compressedCache = nil
-	} else {
-		switch m.Codec {
-		case CompressionNone:
-			payload = m.Value
-		case CompressionGZIP:
-			var buf bytes.Buffer
-			writer := gzip.NewWriter(&buf)
-			writer.Write(m.Value)
-			writer.Close()
-			m.compressedCache = buf.Bytes()
-			payload = m.compressedCache
-		case CompressionSnappy:
-			tmp, err := SnappyEncode(m.Value)
-			if err != nil {
-				return err
-			}
-			m.compressedCache = tmp
-			payload = m.compressedCache
-		default:
-			return EncodingError
-		}
-	}
-
-	err = pe.putBytes(payload)
-	if err != nil {
-		return err
-	}
-
-	return pe.pop()
-}
-
-func (m *Message) decode(pd packetDecoder) (err error) {
-	err = pd.push(&crc32Field{})
-	if err != nil {
-		return err
-	}
-
-	format, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	if format != messageFormat {
-		return DecodingError{Info: "Unexpected messageFormat"}
-	}
-
-	attribute, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	m.Codec = CompressionCodec(attribute & compressionCodecMask)
-
-	m.Key, err = pd.getBytes()
-	if err != nil {
-		return err
-	}
-
-	m.Value, err = pd.getBytes()
-	if err != nil {
-		return err
-	}
-
-	switch m.Codec {
-	case CompressionNone:
-		// nothing to do
-	case CompressionGZIP:
-		if m.Value == nil {
-			return DecodingError{Info: "GZIP compression specified, but no data to uncompress"}
-		}
-		reader, err := gzip.NewReader(bytes.NewReader(m.Value))
-		if err != nil {
-			return err
-		}
-		if m.Value, err = ioutil.ReadAll(reader); err != nil {
-			return err
-		}
-		return m.decodeSet()
-	case CompressionSnappy:
-		if m.Value == nil {
-			return DecodingError{Info: "Snappy compression specified, but no data to uncompress"}
-		}
-		if m.Value, err = SnappyDecode(m.Value); err != nil {
-			return err
-		}
-		return m.decodeSet()
-	default:
-		return DecodingError{Info: "Invalid compression specified"}
-	}
-
-	err = pd.pop()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// decodes a message set from a previousy encoded bulk-message
-func (m *Message) decodeSet() (err error) {
-	pd := realDecoder{raw: m.Value}
-	m.Set = &MessageSet{}
-	return m.Set.decode(&pd)
-}

+ 0 - 93
message_set.go

@@ -1,93 +0,0 @@
-package sarama
-
-type MessageBlock struct {
-	Offset int64
-	Msg    *Message
-}
-
-// Messages convenience helper which returns either all the
-// messages that are wrapped in this block
-func (msb *MessageBlock) Messages() []*MessageBlock {
-	if msb.Msg.Set != nil {
-		return msb.Msg.Set.Messages
-	}
-	return []*MessageBlock{msb}
-}
-
-func (msb *MessageBlock) encode(pe packetEncoder) error {
-	pe.putInt64(msb.Offset)
-	pe.push(&lengthField{})
-	err := msb.Msg.encode(pe)
-	if err != nil {
-		return err
-	}
-	return pe.pop()
-}
-
-func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
-	msb.Offset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	pd.push(&lengthField{})
-	if err != nil {
-		return err
-	}
-
-	msb.Msg = new(Message)
-	err = msb.Msg.decode(pd)
-	if err != nil {
-		return err
-	}
-
-	err = pd.pop()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-type MessageSet struct {
-	PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
-	Messages               []*MessageBlock
-}
-
-func (ms *MessageSet) encode(pe packetEncoder) error {
-	for i := range ms.Messages {
-		err := ms.Messages[i].encode(pe)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (ms *MessageSet) decode(pd packetDecoder) (err error) {
-	ms.Messages = nil
-
-	for pd.remaining() > 0 {
-		msb := new(MessageBlock)
-		err = msb.decode(pd)
-		switch err {
-		case nil:
-			ms.Messages = append(ms.Messages, msb)
-		case InsufficientData:
-			// As an optimization the server is allowed to return a partial message at the
-			// end of the message set. Clients should handle this case. So we just ignore such things.
-			ms.PartialTrailingMessage = true
-			return nil
-		default:
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (ms *MessageSet) addMessage(msg *Message) {
-	block := new(MessageBlock)
-	block.Msg = msg
-	ms.Messages = append(ms.Messages, block)
-}

+ 0 - 113
message_test.go

@@ -1,113 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	emptyMessage = []byte{
-		167, 236, 104, 3, // CRC
-		0x00,                   // magic version byte
-		0x00,                   // attribute flags
-		0xFF, 0xFF, 0xFF, 0xFF, // key
-		0xFF, 0xFF, 0xFF, 0xFF} // value
-
-	emptyGzipMessage = []byte{
-		97, 79, 149, 90, //CRC
-		0x00,                   // magic version byte
-		0x01,                   // attribute flags
-		0xFF, 0xFF, 0xFF, 0xFF, // key
-		// value
-		0x00, 0x00, 0x00, 0x17,
-		0x1f, 0x8b,
-		0x08,
-		0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
-
-	emptyBulkSnappyMessage = []byte{
-		180, 47, 53, 209, //CRC
-		0x00,                   // magic version byte
-		0x02,                   // attribute flags
-		0xFF, 0xFF, 0xFF, 0xFF, // key
-		0, 0, 0, 42,
-		130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic
-		0, 0, 0, 1, // min version
-		0, 0, 0, 1, // default version
-		0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0}
-
-	emptyBulkGzipMessage = []byte{
-		139, 160, 63, 141, //CRC
-		0x00,                   // magic version byte
-		0x01,                   // attribute flags
-		0xFF, 0xFF, 0xFF, 0xFF, // key
-		0x00, 0x00, 0x00, 0x27, // len
-		0x1f, 0x8b, // Gzip Magic
-		0x08, // deflate compressed
-		0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0}
-)
-
-func TestMessageEncoding(t *testing.T) {
-	message := Message{}
-	testEncodable(t, "empty", &message, emptyMessage)
-
-	message.Value = []byte{}
-	message.Codec = CompressionGZIP
-	testEncodable(t, "empty gzip", &message, emptyGzipMessage)
-}
-
-func TestMessageDecoding(t *testing.T) {
-	message := Message{}
-	testDecodable(t, "empty", &message, emptyMessage)
-	if message.Codec != CompressionNone {
-		t.Error("Decoding produced compression codec where there was none.")
-	}
-	if message.Key != nil {
-		t.Error("Decoding produced key where there was none.")
-	}
-	if message.Value != nil {
-		t.Error("Decoding produced value where there was none.")
-	}
-	if message.Set != nil {
-		t.Error("Decoding produced set where there was none.")
-	}
-
-	testDecodable(t, "empty gzip", &message, emptyGzipMessage)
-	if message.Codec != CompressionGZIP {
-		t.Error("Decoding produced incorrect compression codec (was gzip).")
-	}
-	if message.Key != nil {
-		t.Error("Decoding produced key where there was none.")
-	}
-	if message.Value == nil || len(message.Value) != 0 {
-		t.Error("Decoding produced nil or content-ful value where there was an empty array.")
-	}
-}
-
-func TestMessageDecodingBulkSnappy(t *testing.T) {
-	message := Message{}
-	testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage)
-	if message.Codec != CompressionSnappy {
-		t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy)
-	}
-	if message.Key != nil {
-		t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
-	}
-	if message.Set == nil {
-		t.Error("Decoding produced no set, but one was expected.")
-	} else if len(message.Set.Messages) != 2 {
-		t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
-	}
-}
-
-func TestMessageDecodingBulkGzip(t *testing.T) {
-	message := Message{}
-	testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage)
-	if message.Codec != CompressionGZIP {
-		t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP)
-	}
-	if message.Key != nil {
-		t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
-	}
-	if message.Set == nil {
-		t.Error("Decoding produced no set, but one was expected.")
-	} else if len(message.Set.Messages) != 2 {
-		t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
-	}
-}

+ 0 - 28
metadata_request.go

@@ -1,28 +0,0 @@
-package sarama
-
-type MetadataRequest struct {
-	Topics []string
-}
-
-func (mr *MetadataRequest) encode(pe packetEncoder) error {
-	err := pe.putArrayLength(len(mr.Topics))
-	if err != nil {
-		return err
-	}
-
-	for i := range mr.Topics {
-		err = pe.putString(mr.Topics[i])
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (mr *MetadataRequest) key() int16 {
-	return 3
-}
-
-func (mr *MetadataRequest) version() int16 {
-	return 0
-}

+ 0 - 29
metadata_request_test.go

@@ -1,29 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	metadataRequestNoTopics = []byte{
-		0x00, 0x00, 0x00, 0x00}
-
-	metadataRequestOneTopic = []byte{
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'}
-
-	metadataRequestThreeTopics = []byte{
-		0x00, 0x00, 0x00, 0x03,
-		0x00, 0x03, 'f', 'o', 'o',
-		0x00, 0x03, 'b', 'a', 'r',
-		0x00, 0x03, 'b', 'a', 'z'}
-)
-
-func TestMetadataRequest(t *testing.T) {
-	request := new(MetadataRequest)
-	testEncodable(t, "no topics", request, metadataRequestNoTopics)
-
-	request.Topics = []string{"topic1"}
-	testEncodable(t, "one topic", request, metadataRequestOneTopic)
-
-	request.Topics = []string{"foo", "bar", "baz"}
-	testEncodable(t, "three topics", request, metadataRequestThreeTopics)
-}

+ 0 - 218
metadata_response.go

@@ -1,218 +0,0 @@
-package sarama
-
-type PartitionMetadata struct {
-	Err      KError
-	ID       int32
-	Leader   int32
-	Replicas []int32
-	Isr      []int32
-}
-
-func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	pm.Err = KError(tmp)
-
-	pm.ID, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	pm.Leader, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	pm.Replicas, err = pd.getInt32Array()
-	if err != nil {
-		return err
-	}
-
-	pm.Isr, err = pd.getInt32Array()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
-	pe.putInt16(int16(pm.Err))
-	pe.putInt32(pm.ID)
-	pe.putInt32(pm.Leader)
-
-	err = pe.putInt32Array(pm.Replicas)
-	if err != nil {
-		return err
-	}
-
-	err = pe.putInt32Array(pm.Isr)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-type TopicMetadata struct {
-	Err        KError
-	Name       string
-	Partitions []*PartitionMetadata
-}
-
-func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	tm.Err = KError(tmp)
-
-	tm.Name, err = pd.getString()
-	if err != nil {
-		return err
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	tm.Partitions = make([]*PartitionMetadata, n)
-	for i := 0; i < n; i++ {
-		tm.Partitions[i] = new(PartitionMetadata)
-		err = tm.Partitions[i].decode(pd)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
-	pe.putInt16(int16(tm.Err))
-
-	err = pe.putString(tm.Name)
-	if err != nil {
-		return err
-	}
-
-	err = pe.putArrayLength(len(tm.Partitions))
-	if err != nil {
-		return err
-	}
-
-	for _, pm := range tm.Partitions {
-		err = pm.encode(pe)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-type MetadataResponse struct {
-	Brokers []*Broker
-	Topics  []*TopicMetadata
-}
-
-func (m *MetadataResponse) decode(pd packetDecoder) (err error) {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	m.Brokers = make([]*Broker, n)
-	for i := 0; i < n; i++ {
-		m.Brokers[i] = new(Broker)
-		err = m.Brokers[i].decode(pd)
-		if err != nil {
-			return err
-		}
-	}
-
-	n, err = pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	m.Topics = make([]*TopicMetadata, n)
-	for i := 0; i < n; i++ {
-		m.Topics[i] = new(TopicMetadata)
-		err = m.Topics[i].decode(pd)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (m *MetadataResponse) encode(pe packetEncoder) error {
-	err := pe.putArrayLength(len(m.Brokers))
-	if err != nil {
-		return err
-	}
-	for _, broker := range m.Brokers {
-		err = broker.encode(pe)
-		if err != nil {
-			return err
-		}
-	}
-
-	err = pe.putArrayLength(len(m.Topics))
-	if err != nil {
-		return err
-	}
-	for _, tm := range m.Topics {
-		err = tm.encode(pe)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// testing API
-
-func (m *MetadataResponse) AddBroker(addr string, id int32) {
-	m.Brokers = append(m.Brokers, &Broker{id: id, addr: addr})
-}
-
-func (m *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32) {
-	var match *TopicMetadata
-
-	for _, tm := range m.Topics {
-		if tm.Name == topic {
-			match = tm
-			goto foundTopic
-		}
-	}
-
-	match = new(TopicMetadata)
-	match.Name = topic
-	m.Topics = append(m.Topics, match)
-
-foundTopic:
-
-	var pmatch *PartitionMetadata
-
-	for _, pm := range match.Partitions {
-		if pm.ID == partition {
-			pmatch = pm
-			goto foundPartition
-		}
-	}
-
-	pmatch = new(PartitionMetadata)
-	pmatch.ID = partition
-	match.Partitions = append(match.Partitions, pmatch)
-
-foundPartition:
-
-	pmatch.Leader = brokerID
-
-}

+ 0 - 139
metadata_response_test.go

@@ -1,139 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	emptyMetadataResponse = []byte{
-		0x00, 0x00, 0x00, 0x00,
-		0x00, 0x00, 0x00, 0x00}
-
-	brokersNoTopicsMetadataResponse = []byte{
-		0x00, 0x00, 0x00, 0x02,
-
-		0x00, 0x00, 0xab, 0xff,
-		0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't',
-		0x00, 0x00, 0x00, 0x33,
-
-		0x00, 0x01, 0x02, 0x03,
-		0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm',
-		0x00, 0x00, 0x01, 0x11,
-
-		0x00, 0x00, 0x00, 0x00}
-
-	topicsNoBrokersMetadataResponse = []byte{
-		0x00, 0x00, 0x00, 0x00,
-		0x00, 0x00, 0x00, 0x02,
-
-		0x00, 0x00,
-		0x00, 0x03, 'f', 'o', 'o',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x04,
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00, 0x00, 0x07,
-		0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
-		0x00, 0x00, 0x00, 0x00,
-
-		0x00, 0x00,
-		0x00, 0x03, 'b', 'a', 'r',
-		0x00, 0x00, 0x00, 0x00}
-)
-
-func TestEmptyMetadataResponse(t *testing.T) {
-	response := MetadataResponse{}
-
-	testDecodable(t, "empty", &response, emptyMetadataResponse)
-	if len(response.Brokers) != 0 {
-		t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
-	}
-	if len(response.Topics) != 0 {
-		t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
-	}
-}
-
-func TestMetadataResponseWithBrokers(t *testing.T) {
-	response := MetadataResponse{}
-
-	testDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse)
-	if len(response.Brokers) != 2 {
-		t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!")
-	}
-
-	if response.Brokers[0].id != 0xabff {
-		t.Error("Decoding produced invalid broker 0 id.")
-	}
-	if response.Brokers[0].addr != "localhost:51" {
-		t.Error("Decoding produced invalid broker 0 address.")
-	}
-	if response.Brokers[1].id != 0x010203 {
-		t.Error("Decoding produced invalid broker 1 id.")
-	}
-	if response.Brokers[1].addr != "google.com:273" {
-		t.Error("Decoding produced invalid broker 1 address.")
-	}
-
-	if len(response.Topics) != 0 {
-		t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
-	}
-}
-
-func TestMetadataResponseWithTopics(t *testing.T) {
-	response := MetadataResponse{}
-
-	testDecodable(t, "topics, no brokers", &response, topicsNoBrokersMetadataResponse)
-	if len(response.Brokers) != 0 {
-		t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
-	}
-
-	if len(response.Topics) != 2 {
-		t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!")
-	}
-
-	if response.Topics[0].Err != NoError {
-		t.Error("Decoding produced invalid topic 0 error.")
-	}
-
-	if response.Topics[0].Name != "foo" {
-		t.Error("Decoding produced invalid topic 0 name.")
-	}
-
-	if len(response.Topics[0].Partitions) != 1 {
-		t.Fatal("Decoding produced invalid partition count for topic 0.")
-	}
-
-	if response.Topics[0].Partitions[0].Err != InvalidMessageSize {
-		t.Error("Decoding produced invalid topic 0 partition 0 error.")
-	}
-
-	if response.Topics[0].Partitions[0].ID != 0x01 {
-		t.Error("Decoding produced invalid topic 0 partition 0 id.")
-	}
-
-	if response.Topics[0].Partitions[0].Leader != 0x07 {
-		t.Error("Decoding produced invalid topic 0 partition 0 leader.")
-	}
-
-	if len(response.Topics[0].Partitions[0].Replicas) != 3 {
-		t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.")
-	}
-	for i := 0; i < 3; i++ {
-		if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) {
-			t.Error("Decoding produced invalid topic 0 partition 0 replica", i)
-		}
-	}
-
-	if len(response.Topics[0].Partitions[0].Isr) != 0 {
-		t.Error("Decoding produced invalid topic 0 partition 0 isr length.")
-	}
-
-	if response.Topics[1].Err != NoError {
-		t.Error("Decoding produced invalid topic 1 error.")
-	}
-
-	if response.Topics[1].Name != "bar" {
-		t.Error("Decoding produced invalid topic 0 name.")
-	}
-
-	if len(response.Topics[1].Partitions) != 0 {
-		t.Error("Decoding produced invalid partition count for topic 1.")
-	}
-}

+ 0 - 161
mockbroker.go

@@ -1,161 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"errors"
-	"io"
-	"net"
-	"strconv"
-)
-
-// TestState is a generic interface for a test state, implemented e.g. by testing.T
-type TestState interface {
-	Error(args ...interface{})
-	Fatal(args ...interface{})
-	Fatalf(format string, args ...interface{})
-}
-
-// MockBroker is a mock Kafka broker. It consists of a TCP server on a kernel-selected localhost port that
-// accepts a single connection. It reads Kafka requests from that connection and returns each response
-// from the channel provided at creation-time (if a response has a len of 0, nothing is sent, if a response
-// the server sleeps for 250ms instead of reading a request).
-//
-// When running tests with one of these, it is strongly recommended to specify a timeout to `go test` so that if the broker hangs
-// waiting for a response, the test panics.
-//
-// It is not necessary to prefix message length or correlation ID to your response bytes, the server does that
-// automatically as a convenience.
-type MockBroker struct {
-	brokerID     int
-	port         int32
-	stopper      chan bool
-	expectations chan encoder
-	listener     net.Listener
-	t            TestState
-	expecting    encoder
-}
-
-func (b *MockBroker) BrokerID() int {
-	return b.brokerID
-}
-
-func (b *MockBroker) Port() int32 {
-	return b.port
-}
-
-func (b *MockBroker) Addr() string {
-	return b.listener.Addr().String()
-}
-
-type rawExpectation []byte
-
-func (r rawExpectation) ResponseBytes() []byte {
-	return r
-}
-
-func (b *MockBroker) Close() {
-	if b.expecting != nil {
-		b.t.Fatalf("Not all expectations were satisfied in mockBroker with ID=%d! Still waiting on %#v", b.BrokerID(), b.expecting)
-	}
-	close(b.expectations)
-	<-b.stopper
-}
-
-func (b *MockBroker) serverLoop() (ok bool) {
-	var (
-		err  error
-		conn net.Conn
-	)
-
-	defer close(b.stopper)
-	if conn, err = b.listener.Accept(); err != nil {
-		return b.serverError(err, conn)
-	}
-	reqHeader := make([]byte, 4)
-	resHeader := make([]byte, 8)
-	for expectation := range b.expectations {
-		b.expecting = expectation
-		_, err = io.ReadFull(conn, reqHeader)
-		b.expecting = nil
-		if err != nil {
-			return b.serverError(err, conn)
-		}
-		body := make([]byte, binary.BigEndian.Uint32(reqHeader))
-		if len(body) < 10 {
-			return b.serverError(errors.New("Kafka request too short."), conn)
-		}
-		if _, err = io.ReadFull(conn, body); err != nil {
-			return b.serverError(err, conn)
-		}
-
-		response, err := encode(expectation)
-		if err != nil {
-			return false
-		}
-		if len(response) == 0 {
-			continue
-		}
-
-		binary.BigEndian.PutUint32(resHeader, uint32(len(response)+4))
-		binary.BigEndian.PutUint32(resHeader[4:], binary.BigEndian.Uint32(body[4:]))
-		if _, err = conn.Write(resHeader); err != nil {
-			return b.serverError(err, conn)
-		}
-		if _, err = conn.Write(response); err != nil {
-			return b.serverError(err, conn)
-		}
-	}
-	if err = conn.Close(); err != nil {
-		return b.serverError(err, nil)
-	}
-	if err = b.listener.Close(); err != nil {
-		b.t.Error(err)
-		return false
-	}
-	return true
-}
-
-func (b *MockBroker) serverError(err error, conn net.Conn) bool {
-	b.t.Error(err)
-	if conn != nil {
-		conn.Close()
-	}
-	b.listener.Close()
-	return false
-}
-
-// NewMockBroker launches a fake Kafka broker. It takes a TestState (e.g. *testing.T) as provided by the
-// test framework and a channel of responses to use.  If an error occurs it is
-// simply logged to the TestState and the broker exits.
-func NewMockBroker(t TestState, brokerID int) *MockBroker {
-	var err error
-
-	broker := &MockBroker{
-		stopper:      make(chan bool),
-		t:            t,
-		brokerID:     brokerID,
-		expectations: make(chan encoder, 512),
-	}
-
-	broker.listener, err = net.Listen("tcp", "localhost:0")
-	if err != nil {
-		t.Fatal(err)
-	}
-	_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
-	if err != nil {
-		t.Fatal(err)
-	}
-	tmp, err := strconv.ParseInt(portStr, 10, 32)
-	if err != nil {
-		t.Fatal(err)
-	}
-	broker.port = int32(tmp)
-
-	go broker.serverLoop()
-
-	return broker
-}
-
-func (b *MockBroker) Returns(e encoder) {
-	b.expectations <- e
-}

+ 0 - 71
offset_commit_request.go

@@ -1,71 +0,0 @@
-package sarama
-
-// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
-// tells the broker to set the timestamp to the time at which the request was received.
-const ReceiveTime int64 = -1
-
-type offsetCommitRequestBlock struct {
-	offset    int64
-	timestamp int64
-	metadata  string
-}
-
-func (r *offsetCommitRequestBlock) encode(pe packetEncoder) error {
-	pe.putInt64(r.offset)
-	pe.putInt64(r.timestamp)
-	return pe.putString(r.metadata)
-}
-
-type OffsetCommitRequest struct {
-	ConsumerGroup string
-	blocks        map[string]map[int32]*offsetCommitRequestBlock
-}
-
-func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
-	err := pe.putString(r.ConsumerGroup)
-	if err != nil {
-		return err
-	}
-	err = pe.putArrayLength(len(r.blocks))
-	if err != nil {
-		return err
-	}
-	for topic, partitions := range r.blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			err = block.encode(pe)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (r *OffsetCommitRequest) key() int16 {
-	return 8
-}
-
-func (r *OffsetCommitRequest) version() int16 {
-	return 0
-}
-
-func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
-	if r.blocks == nil {
-		r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
-	}
-
-	r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
-}

+ 0 - 34
offset_commit_request_test.go

@@ -1,34 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	offsetCommitRequestNoGroupNoBlocks = []byte{
-		0x00, 0x00,
-		0x00, 0x00, 0x00, 0x00}
-
-	offsetCommitRequestNoBlocks = []byte{
-		0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
-		0x00, 0x00, 0x00, 0x00}
-
-	offsetCommitRequestOneBlock = []byte{
-		0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x05, 't', 'o', 'p', 'i', 'c',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00, 0x52, 0x21,
-		0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
-		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
-		0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
-)
-
-func TestOffsetCommitRequest(t *testing.T) {
-	request := new(OffsetCommitRequest)
-	testEncodable(t, "no group, no blocks", request, offsetCommitRequestNoGroupNoBlocks)
-
-	request.ConsumerGroup = "foobar"
-	testEncodable(t, "no blocks", request, offsetCommitRequestNoBlocks)
-
-	request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata")
-	testEncodable(t, "one block", request, offsetCommitRequestOneBlock)
-}

+ 0 - 42
offset_commit_response.go

@@ -1,42 +0,0 @@
-package sarama
-
-type OffsetCommitResponse struct {
-	Errors map[string]map[int32]KError
-}
-
-func (r *OffsetCommitResponse) decode(pd packetDecoder) (err error) {
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Errors = make(map[string]map[int32]KError, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numErrors, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Errors[name] = make(map[int32]KError, numErrors)
-
-		for j := 0; j < numErrors; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			tmp, err := pd.getInt16()
-			if err != nil {
-				return err
-			}
-			r.Errors[name][id] = KError(tmp)
-		}
-	}
-
-	return nil
-}

+ 0 - 52
offset_commit_response_test.go

@@ -1,52 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	emptyOffsetCommitResponse = []byte{
-		0x00, 0x00, 0x00, 0x00}
-
-	normalOffsetCommitResponse = []byte{
-		0x00, 0x00, 0x00, 0x02,
-
-		0x00, 0x01, 'm',
-		0x00, 0x00, 0x00, 0x00,
-
-		0x00, 0x01, 't',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00, 0x00, 0x00,
-		0x00, 0x06}
-)
-
-func TestEmptyOffsetCommitResponse(t *testing.T) {
-	response := OffsetCommitResponse{}
-
-	testDecodable(t, "empty", &response, emptyOffsetCommitResponse)
-
-	if len(response.Errors) != 0 {
-		t.Error("Decoding produced errors where there were none.")
-	}
-}
-
-func TestNormalOffsetCommitResponse(t *testing.T) {
-	response := OffsetCommitResponse{}
-
-	testDecodable(t, "normal", &response, normalOffsetCommitResponse)
-
-	if len(response.Errors) != 2 {
-		t.Fatal("Decoding produced wrong number of errors.")
-	}
-
-	if len(response.Errors["m"]) != 0 {
-		t.Error("Decoding produced errors for topic 'm' where there were none.")
-	}
-
-	if len(response.Errors["t"]) != 1 {
-		t.Fatal("Decoding produced wrong number of errors for topic 't'.")
-	}
-
-	if response.Errors["t"][0] != NotLeaderForPartition {
-		t.Error("Decoding produced wrong error for topic 't' partition 0.")
-	}
-
-}

+ 0 - 41
offset_fetch_request.go

@@ -1,41 +0,0 @@
-package sarama
-
-type OffsetFetchRequest struct {
-	ConsumerGroup string
-	partitions    map[string][]int32
-}
-
-func (r *OffsetFetchRequest) encode(pe packetEncoder) error {
-	err := pe.putString(r.ConsumerGroup)
-	if err != nil {
-		return err
-	}
-	err = pe.putArrayLength(len(r.partitions))
-	if err != nil {
-		return err
-	}
-	for topic, partitions := range r.partitions {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		pe.putInt32Array(partitions)
-	}
-	return nil
-}
-
-func (r *OffsetFetchRequest) key() int16 {
-	return 9
-}
-
-func (r *OffsetFetchRequest) version() int16 {
-	return 0
-}
-
-func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
-	if r.partitions == nil {
-		r.partitions = make(map[string][]int32)
-	}
-
-	r.partitions[topic] = append(r.partitions[topic], partitionID)
-}

+ 0 - 31
offset_fetch_request_test.go

@@ -1,31 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	offsetFetchRequestNoGroupNoPartitions = []byte{
-		0x00, 0x00,
-		0x00, 0x00, 0x00, 0x00}
-
-	offsetFetchRequestNoPartitions = []byte{
-		0x00, 0x04, 'b', 'l', 'a', 'h',
-		0x00, 0x00, 0x00, 0x00}
-
-	offsetFetchRequestOnePartition = []byte{
-		0x00, 0x04, 'b', 'l', 'a', 'h',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't',
-		0x00, 0x00, 0x00, 0x01,
-		0x4F, 0x4F, 0x4F, 0x4F}
-)
-
-func TestOffsetFetchRequest(t *testing.T) {
-	request := new(OffsetFetchRequest)
-	testEncodable(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions)
-
-	request.ConsumerGroup = "blah"
-	testEncodable(t, "no partitions", request, offsetFetchRequestNoPartitions)
-
-	request.AddPartition("topicTheFirst", 0x4F4F4F4F)
-	testEncodable(t, "one partition", request, offsetFetchRequestOnePartition)
-}

+ 0 - 82
offset_fetch_response.go

@@ -1,82 +0,0 @@
-package sarama
-
-type OffsetFetchResponseBlock struct {
-	Offset   int64
-	Metadata string
-	Err      KError
-}
-
-func (r *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
-	r.Offset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	r.Metadata, err = pd.getString()
-	if err != nil {
-		return err
-	}
-
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.Err = KError(tmp)
-
-	return nil
-}
-
-func (r *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
-	pe.putInt64(r.Offset)
-
-	err = pe.putString(r.Metadata)
-	if err != nil {
-		return err
-	}
-
-	pe.putInt16(int16(r.Err))
-
-	return nil
-}
-
-type OffsetFetchResponse struct {
-	Blocks map[string]map[int32]*OffsetFetchResponseBlock
-}
-
-func (r *OffsetFetchResponse) decode(pd packetDecoder) (err error) {
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numBlocks, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
-
-		for j := 0; j < numBlocks; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := new(OffsetFetchResponseBlock)
-			err = block.decode(pd)
-			if err != nil {
-				return err
-			}
-			r.Blocks[name][id] = block
-		}
-	}
-
-	return nil
-}

+ 0 - 61
offset_fetch_response_test.go

@@ -1,61 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	emptyOffsetFetchResponse = []byte{
-		0x00, 0x00, 0x00, 0x00}
-
-	normalOffsetFetchResponse = []byte{
-		0x00, 0x00, 0x00, 0x02,
-
-		0x00, 0x01, 'm',
-		0x00, 0x00, 0x00, 0x00,
-
-		0x00, 0x01, 't',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00, 0x00, 0x00,
-		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-		0x00, 0x02, 'm', 'd',
-		0x00, 0x07}
-)
-
-func TestEmptyOffsetFetchResponse(t *testing.T) {
-	response := OffsetFetchResponse{}
-
-	testDecodable(t, "empty", &response, emptyOffsetFetchResponse)
-
-	if len(response.Blocks) != 0 {
-		t.Error("Decoding produced topic blocks where there were none.")
-	}
-}
-
-func TestNormalOffsetFetchResponse(t *testing.T) {
-	response := OffsetFetchResponse{}
-
-	testDecodable(t, "normal", &response, normalOffsetFetchResponse)
-
-	if len(response.Blocks) != 2 {
-		t.Fatal("Decoding produced wrong number of blocks.")
-	}
-
-	if len(response.Blocks["m"]) != 0 {
-		t.Error("Decoding produced partitions for topic 'm' where there were none.")
-	}
-
-	if len(response.Blocks["t"]) != 1 {
-		t.Fatal("Decoding produced wrong number of blocks for topic 't'.")
-	}
-
-	if response.Blocks["t"][0].Offset != 0 {
-		t.Error("Decoding produced wrong offset for topic 't' partition 0.")
-	}
-
-	if response.Blocks["t"][0].Metadata != "md" {
-		t.Error("Decoding produced wrong metadata for topic 't' partition 0.")
-	}
-
-	if response.Blocks["t"][0].Err != RequestTimedOut {
-		t.Error("Decoding produced wrong error for topic 't' partition 0.")
-	}
-}

+ 0 - 78
offset_request.go

@@ -1,78 +0,0 @@
-package sarama
-
-// OffsetTime is used in Offset Requests to ask for all messages before a certain time. Any positive int64
-// value will be interpreted as milliseconds, or use the special constants defined here.
-type OffsetTime int64
-
-const (
-	// LatestOffsets askes for the latest offsets.
-	LatestOffsets OffsetTime = -1
-	// EarliestOffset askes for the earliest available offset. Note that because offsets are pulled in descending order,
-	// asking for the earliest offset will always return you a single element.
-	EarliestOffset OffsetTime = -2
-)
-
-type offsetRequestBlock struct {
-	time       OffsetTime
-	maxOffsets int32
-}
-
-func (r *offsetRequestBlock) encode(pe packetEncoder) error {
-	pe.putInt64(int64(r.time))
-	pe.putInt32(r.maxOffsets)
-	return nil
-}
-
-type OffsetRequest struct {
-	blocks map[string]map[int32]*offsetRequestBlock
-}
-
-func (r *OffsetRequest) encode(pe packetEncoder) error {
-	pe.putInt32(-1) // replica ID is always -1 for clients
-	err := pe.putArrayLength(len(r.blocks))
-	if err != nil {
-		return err
-	}
-	for topic, partitions := range r.blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			err = block.encode(pe)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (r *OffsetRequest) key() int16 {
-	return 2
-}
-
-func (r *OffsetRequest) version() int16 {
-	return 0
-}
-
-func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time OffsetTime, maxOffsets int32) {
-	if r.blocks == nil {
-		r.blocks = make(map[string]map[int32]*offsetRequestBlock)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
-	}
-
-	tmp := new(offsetRequestBlock)
-	tmp.time = time
-	tmp.maxOffsets = maxOffsets
-
-	r.blocks[topic][partitionID] = tmp
-}

+ 0 - 26
offset_request_test.go

@@ -1,26 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	offsetRequestNoBlocks = []byte{
-		0xFF, 0xFF, 0xFF, 0xFF,
-		0x00, 0x00, 0x00, 0x00}
-
-	offsetRequestOneBlock = []byte{
-		0xFF, 0xFF, 0xFF, 0xFF,
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x03, 'f', 'o', 'o',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00, 0x00, 0x04,
-		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00, 0x00, 0x02}
-)
-
-func TestOffsetRequest(t *testing.T) {
-	request := new(OffsetRequest)
-	testEncodable(t, "no blocks", request, offsetRequestNoBlocks)
-
-	request.AddBlock("foo", 4, 1, 2)
-	testEncodable(t, "one block", request, offsetRequestOneBlock)
-}

+ 0 - 124
offset_response.go

@@ -1,124 +0,0 @@
-package sarama
-
-type OffsetResponseBlock struct {
-	Err     KError
-	Offsets []int64
-}
-
-func (r *OffsetResponseBlock) decode(pd packetDecoder) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.Err = KError(tmp)
-
-	r.Offsets, err = pd.getInt64Array()
-
-	return err
-}
-
-func (r *OffsetResponseBlock) encode(pe packetEncoder) (err error) {
-	pe.putInt16(int16(r.Err))
-
-	return pe.putInt64Array(r.Offsets)
-}
-
-type OffsetResponse struct {
-	Blocks map[string]map[int32]*OffsetResponseBlock
-}
-
-func (r *OffsetResponse) decode(pd packetDecoder) (err error) {
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numBlocks, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
-
-		for j := 0; j < numBlocks; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := new(OffsetResponseBlock)
-			err = block.decode(pd)
-			if err != nil {
-				return err
-			}
-			r.Blocks[name][id] = block
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
-	if r.Blocks == nil {
-		return nil
-	}
-
-	if r.Blocks[topic] == nil {
-		return nil
-	}
-
-	return r.Blocks[topic][partition]
-}
-
-/*
-// [0 0 0 1 ntopics
-0 8 109 121 95 116 111 112 105 99 topic
-0 0 0 1 npartitions
-0 0 0 0 id
-0 0
-
-0 0 0 1 0 0 0 0
-0 1 1 1 0 0 0 1
-0 8 109 121 95 116 111 112
-105 99 0 0 0 1 0 0
-0 0 0 0 0 0 0 1
-0 0 0 0 0 1 1 1] <nil>
-
-*/
-func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
-	if err = pe.putArrayLength(len(r.Blocks)); err != nil {
-		return err
-	}
-
-	for topic, partitions := range r.Blocks {
-		pe.putString(topic)
-		pe.putArrayLength(len(partitions))
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			block.encode(pe)
-		}
-	}
-
-	return nil
-}
-
-// testing API
-
-func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
-	}
-	byTopic, ok := r.Blocks[topic]
-	if !ok {
-		byTopic = make(map[int32]*OffsetResponseBlock)
-		r.Blocks[topic] = byTopic
-	}
-	byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}}
-}

+ 0 - 62
offset_response_test.go

@@ -1,62 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	emptyOffsetResponse = []byte{
-		0x00, 0x00, 0x00, 0x00}
-
-	normalOffsetResponse = []byte{
-		0x00, 0x00, 0x00, 0x02,
-
-		0x00, 0x01, 'a',
-		0x00, 0x00, 0x00, 0x00,
-
-		0x00, 0x01, 'z',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00, 0x00, 0x02,
-		0x00, 0x00,
-		0x00, 0x00, 0x00, 0x02,
-		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
-		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}
-)
-
-func TestEmptyOffsetResponse(t *testing.T) {
-	response := OffsetResponse{}
-
-	testDecodable(t, "empty", &response, emptyOffsetResponse)
-	if len(response.Blocks) != 0 {
-		t.Error("Decoding produced", len(response.Blocks), "topics where there were none.")
-	}
-}
-
-func TestNormalOffsetResponse(t *testing.T) {
-	response := OffsetResponse{}
-
-	testDecodable(t, "normal", &response, normalOffsetResponse)
-
-	if len(response.Blocks) != 2 {
-		t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
-	}
-
-	if len(response.Blocks["a"]) != 0 {
-		t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
-	}
-
-	if len(response.Blocks["z"]) != 1 {
-		t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
-	}
-
-	if response.Blocks["z"][2].Err != NoError {
-		t.Fatal("Decoding produced invalid error for topic z partition 2.")
-	}
-
-	if len(response.Blocks["z"][2].Offsets) != 2 {
-		t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.")
-	}
-
-	if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 {
-		t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
-	}
-
-}

+ 0 - 44
packet_decoder.go

@@ -1,44 +0,0 @@
-package sarama
-
-// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
-// Types implementing Decoder only need to worry about calling methods like GetString,
-// not about how a string is represented in Kafka.
-type packetDecoder interface {
-	// Primitives
-	getInt8() (int8, error)
-	getInt16() (int16, error)
-	getInt32() (int32, error)
-	getInt64() (int64, error)
-	getArrayLength() (int, error)
-
-	// Collections
-	getBytes() ([]byte, error)
-	getString() (string, error)
-	getInt32Array() ([]int32, error)
-	getInt64Array() ([]int64, error)
-
-	// Subsets
-	remaining() int
-	getSubset(length int) (packetDecoder, error)
-
-	// Stacks, see PushDecoder
-	push(in pushDecoder) error
-	pop() error
-}
-
-// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
-// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
-// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
-// depend upon have been decoded.
-type pushDecoder interface {
-	// Saves the offset into the input buffer as the location to actually read the calculated value when able.
-	saveOffset(in int)
-
-	// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
-	reserveLength() int
-
-	// Indicates that all required data is now available to calculate and check the field.
-	// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
-	// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
-	check(curOffset int, buf []byte) error
-}

+ 0 - 41
packet_encoder.go

@@ -1,41 +0,0 @@
-package sarama
-
-// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
-// Types implementing Encoder only need to worry about calling methods like PutString,
-// not about how a string is represented in Kafka.
-type packetEncoder interface {
-	// Primitives
-	putInt8(in int8)
-	putInt16(in int16)
-	putInt32(in int32)
-	putInt64(in int64)
-	putArrayLength(in int) error
-
-	// Collections
-	putBytes(in []byte) error
-	putRawBytes(in []byte) error
-	putString(in string) error
-	putInt32Array(in []int32) error
-	putInt64Array(in []int64) error
-
-	// Stacks, see PushEncoder
-	push(in pushEncoder)
-	pop() error
-}
-
-// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
-// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
-// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
-// depend upon have been written.
-type pushEncoder interface {
-	// Saves the offset into the input buffer as the location to actually write the calculated value when able.
-	saveOffset(in int)
-
-	// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
-	reserveLength() int
-
-	// Indicates that all required data is now available to calculate and write the field.
-	// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
-	// of data to the saved offset, based on the data between the saved offset and curOffset.
-	run(curOffset int, buf []byte) error
-}

+ 0 - 86
partitioner.go

@@ -1,86 +0,0 @@
-package sarama
-
-import (
-	"hash"
-	"hash/fnv"
-	"math/rand"
-	"sync"
-	"time"
-)
-
-// Partitioner is anything that, given a Kafka message key and a number of partitions indexed [0...numPartitions-1],
-// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
-// as simple default implementations.
-type Partitioner interface {
-	Partition(key Encoder, numPartitions int32) int32
-}
-
-// RandomPartitioner implements the Partitioner interface by choosing a random partition each time.
-type RandomPartitioner struct {
-	generator *rand.Rand
-	m         sync.Mutex
-}
-
-func NewRandomPartitioner() *RandomPartitioner {
-	p := new(RandomPartitioner)
-	p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
-	return p
-}
-
-func (p *RandomPartitioner) Partition(key Encoder, numPartitions int32) int32 {
-	p.m.Lock()
-	defer p.m.Unlock()
-	return int32(p.generator.Intn(int(numPartitions)))
-}
-
-// RoundRobinPartitioner implements the Partitioner interface by walking through the available partitions one at a time.
-type RoundRobinPartitioner struct {
-	partition int32
-	m         sync.Mutex
-}
-
-func (p *RoundRobinPartitioner) Partition(key Encoder, numPartitions int32) int32 {
-	p.m.Lock()
-	defer p.m.Unlock()
-	if p.partition >= numPartitions {
-		p.partition = 0
-	}
-	ret := p.partition
-	p.partition++
-	return ret
-}
-
-// HashPartitioner implements the Partitioner interface. If the key is nil, or fails to encode, then a random partition
-// is chosen. Otherwise the FNV-1a hash of the encoded bytes is used modulus the number of partitions. This ensures that messages
-// with the same key always end up on the same partition.
-type HashPartitioner struct {
-	random *RandomPartitioner
-	hasher hash.Hash32
-	m      sync.Mutex
-}
-
-func NewHashPartitioner() *HashPartitioner {
-	p := new(HashPartitioner)
-	p.random = NewRandomPartitioner()
-	p.hasher = fnv.New32a()
-	return p
-}
-
-func (p *HashPartitioner) Partition(key Encoder, numPartitions int32) int32 {
-	p.m.Lock()
-	defer p.m.Unlock()
-	if key == nil {
-		return p.random.Partition(key, numPartitions)
-	}
-	bytes, err := key.Encode()
-	if err != nil {
-		return p.random.Partition(key, numPartitions)
-	}
-	p.hasher.Reset()
-	p.hasher.Write(bytes)
-	hash := int32(p.hasher.Sum32())
-	if hash < 0 {
-		hash = -hash
-	}
-	return hash % numPartitions
-}

+ 0 - 74
partitioner_test.go

@@ -1,74 +0,0 @@
-package sarama
-
-import (
-	"crypto/rand"
-	"testing"
-)
-
-func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, key Encoder, numPartitions int32) {
-	choice := partitioner.Partition(key, numPartitions)
-	if choice < 0 || choice >= numPartitions {
-		t.Error(partitioner, "returned partition", choice, "outside of range for", key)
-	}
-	for i := 1; i < 50; i++ {
-		newChoice := partitioner.Partition(key, numPartitions)
-		if newChoice != choice {
-			t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".")
-		}
-	}
-}
-
-func TestRandomPartitioner(t *testing.T) {
-	partitioner := NewRandomPartitioner()
-
-	choice := partitioner.Partition(nil, 1)
-	if choice != 0 {
-		t.Error("Returned non-zero partition when only one available.")
-	}
-
-	for i := 1; i < 50; i++ {
-		choice := partitioner.Partition(nil, 50)
-		if choice < 0 || choice >= 50 {
-			t.Error("Returned partition", choice, "outside of range.")
-		}
-	}
-}
-
-func TestRoundRobinPartitioner(t *testing.T) {
-	partitioner := RoundRobinPartitioner{}
-
-	choice := partitioner.Partition(nil, 1)
-	if choice != 0 {
-		t.Error("Returned non-zero partition when only one available.")
-	}
-
-	var i int32
-	for i = 1; i < 50; i++ {
-		choice := partitioner.Partition(nil, 7)
-		if choice != i%7 {
-			t.Error("Returned partition", choice, "expecting", i%7)
-		}
-	}
-}
-
-func TestHashPartitioner(t *testing.T) {
-	partitioner := NewHashPartitioner()
-
-	choice := partitioner.Partition(nil, 1)
-	if choice != 0 {
-		t.Error("Returned non-zero partition when only one available.")
-	}
-
-	for i := 1; i < 50; i++ {
-		choice := partitioner.Partition(nil, 50)
-		if choice < 0 || choice >= 50 {
-			t.Error("Returned partition", choice, "outside of range for nil key.")
-		}
-	}
-
-	buf := make([]byte, 256)
-	for i := 1; i < 50; i++ {
-		rand.Read(buf)
-		assertPartitioningConsistent(t, partitioner, ByteEncoder(buf), 50)
-	}
-}

+ 0 - 95
prep_encoder.go

@@ -1,95 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"math"
-)
-
-type prepEncoder struct {
-	length int
-}
-
-// primitives
-
-func (pe *prepEncoder) putInt8(in int8) {
-	pe.length += binary.Size(in)
-}
-
-func (pe *prepEncoder) putInt16(in int16) {
-	pe.length += binary.Size(in)
-}
-
-func (pe *prepEncoder) putInt32(in int32) {
-	pe.length += binary.Size(in)
-}
-
-func (pe *prepEncoder) putInt64(in int64) {
-	pe.length += binary.Size(in)
-}
-
-func (pe *prepEncoder) putArrayLength(in int) error {
-	if in > math.MaxInt32 {
-		return EncodingError
-	}
-	pe.length += 4
-	return nil
-}
-
-// arrays
-
-func (pe *prepEncoder) putBytes(in []byte) error {
-	pe.length += 4
-	if in == nil {
-		return nil
-	}
-	if len(in) > math.MaxInt32 {
-		return EncodingError
-	}
-	pe.length += len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putRawBytes(in []byte) error {
-	if len(in) > math.MaxInt32 {
-		return EncodingError
-	}
-	pe.length += len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putString(in string) error {
-	pe.length += 2
-	if len(in) > math.MaxInt16 {
-		return EncodingError
-	}
-	pe.length += len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putInt32Array(in []int32) error {
-	err := pe.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-	pe.length += 4 * len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putInt64Array(in []int64) error {
-	err := pe.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-	pe.length += 8 * len(in)
-	return nil
-}
-
-// stackable
-
-func (pe *prepEncoder) push(in pushEncoder) {
-	pe.length += in.reserveLength()
-}
-
-func (pe *prepEncoder) pop() error {
-	return nil
-}

+ 0 - 101
produce_message.go

@@ -1,101 +0,0 @@
-package sarama
-
-import "log"
-
-type produceMessage struct {
-	tp         topicPartition
-	key, value []byte
-	retried    bool
-	sync       bool
-}
-
-type produceRequestBuilder []*produceMessage
-
-// If the message is synchronous, we manually send it and wait for a return.
-// Otherwise, we just hand it back to the producer to enqueue using the normal
-// method.
-func (msg *produceMessage) enqueue(p *Producer) error {
-	if !msg.sync {
-		return p.addMessage(msg)
-	}
-
-	var prb produceRequestBuilder = []*produceMessage{msg}
-	bp, err := p.brokerProducerFor(msg.tp)
-	if err != nil {
-		return err
-	}
-	errs := make(chan error, 1)
-	bp.flushRequest(p, prb, func(err error) {
-		errs <- err
-	})
-	return <-errs
-
-}
-
-func (msg *produceMessage) reenqueue(p *Producer) error {
-	if !msg.retried {
-		msg.retried = true
-		return msg.enqueue(p)
-	}
-	return nil
-}
-
-func (msg *produceMessage) hasTopicPartition(topic string, partition int32) bool {
-	return msg.tp.partition == partition && msg.tp.topic == topic
-}
-
-func (b produceRequestBuilder) toRequest(config *ProducerConfig) *ProduceRequest {
-	req := &ProduceRequest{RequiredAcks: config.RequiredAcks, Timeout: config.Timeout}
-
-	// If compression is enabled, we need to group messages by topic-partition and
-	// wrap them in MessageSets. We already discarded that grouping, so we
-	// inefficiently re-sort them. This could be optimized (ie. pass a hash around
-	// rather than an array. Not sure what the best way is.
-	if config.Compression != CompressionNone {
-		msgSets := make(map[topicPartition]*MessageSet)
-		for _, pmsg := range b {
-			msgSet, ok := msgSets[pmsg.tp]
-			if !ok {
-				msgSet = new(MessageSet)
-				msgSets[pmsg.tp] = msgSet
-			}
-
-			msgSet.addMessage(&Message{Codec: CompressionNone, Key: pmsg.key, Value: pmsg.value})
-		}
-		for tp, msgSet := range msgSets {
-			valBytes, err := encode(msgSet)
-			if err != nil {
-				log.Fatal(err) // if this happens, it's basically our fault.
-			}
-			msg := Message{Codec: config.Compression, Key: nil, Value: valBytes}
-			req.AddMessage(tp.topic, tp.partition, &msg)
-		}
-		return req
-	}
-
-	// Compression is not enabled. Dumb-ly append each request directly to the
-	// request, with no MessageSet wrapper.
-	for _, pmsg := range b {
-		msg := Message{Codec: config.Compression, Key: pmsg.key, Value: pmsg.value}
-		req.AddMessage(pmsg.tp.topic, pmsg.tp.partition, &msg)
-	}
-	return req
-}
-
-func (msg *produceMessage) byteSize() uint32 {
-	return uint32(len(msg.key) + len(msg.value))
-}
-
-func (b produceRequestBuilder) byteSize() uint32 {
-	var size uint32
-	for _, m := range b {
-		size += m.byteSize()
-	}
-	return size
-}
-
-func (b produceRequestBuilder) reverseEach(fn func(m *produceMessage)) {
-	for i := len(b) - 1; i >= 0; i-- {
-		fn(b[i])
-	}
-}

+ 0 - 79
produce_request.go

@@ -1,79 +0,0 @@
-package sarama
-
-// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
-// it must see before responding. Any positive int16 value is valid, or the constants defined here.
-type RequiredAcks int16
-
-const (
-	// NoResponse doesn't send any response, the TCP ACK is all you get.
-	NoResponse RequiredAcks = 0
-	// WaitForLocal waits for only the local commit to succeed before responding.
-	WaitForLocal RequiredAcks = 1
-	// WaitForAll waits for all replicas to commit before responding.
-	WaitForAll RequiredAcks = -1
-)
-
-type ProduceRequest struct {
-	RequiredAcks RequiredAcks
-	Timeout      int32
-	msgSets      map[string]map[int32]*MessageSet
-}
-
-func (p *ProduceRequest) encode(pe packetEncoder) error {
-	pe.putInt16(int16(p.RequiredAcks))
-	pe.putInt32(p.Timeout)
-	err := pe.putArrayLength(len(p.msgSets))
-	if err != nil {
-		return err
-	}
-	for topic, partitions := range p.msgSets {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-		for id, msgSet := range partitions {
-			pe.putInt32(id)
-			pe.push(&lengthField{})
-			err = msgSet.encode(pe)
-			if err != nil {
-				return err
-			}
-			err = pe.pop()
-			if err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (p *ProduceRequest) key() int16 {
-	return 0
-}
-
-func (p *ProduceRequest) version() int16 {
-	return 0
-}
-
-func (p *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
-	if p.msgSets == nil {
-		p.msgSets = make(map[string]map[int32]*MessageSet)
-	}
-
-	if p.msgSets[topic] == nil {
-		p.msgSets[topic] = make(map[int32]*MessageSet)
-	}
-
-	set := p.msgSets[topic][partition]
-
-	if set == nil {
-		set = new(MessageSet)
-		p.msgSets[topic][partition] = set
-	}
-
-	set.addMessage(msg)
-}

+ 0 - 45
produce_request_test.go

@@ -1,45 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	produceRequestEmpty = []byte{
-		0x00, 0x00,
-		0x00, 0x00, 0x00, 0x00,
-		0x00, 0x00, 0x00, 0x00}
-
-	produceRequestHeader = []byte{
-		0x01, 0x23,
-		0x00, 0x00, 0x04, 0x44,
-		0x00, 0x00, 0x00, 0x00}
-
-	produceRequestOneMessage = []byte{
-		0x01, 0x23,
-		0x00, 0x00, 0x04, 0x44,
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x05, 't', 'o', 'p', 'i', 'c',
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00, 0x00, 0xAD,
-		0x00, 0x00, 0x00, 0x1C,
-		// messageSet
-		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-		0x00, 0x00, 0x00, 0x10,
-		// message
-		0x23, 0x96, 0x4a, 0xf7, // CRC
-		0x00,
-		0x00,
-		0xFF, 0xFF, 0xFF, 0xFF,
-		0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
-)
-
-func TestProduceRequest(t *testing.T) {
-	request := new(ProduceRequest)
-	testEncodable(t, "empty", request, produceRequestEmpty)
-
-	request.RequiredAcks = 0x123
-	request.Timeout = 0x444
-	testEncodable(t, "header", request, produceRequestHeader)
-
-	request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}})
-	testEncodable(t, "one message", request, produceRequestOneMessage)
-}

+ 0 - 112
produce_response.go

@@ -1,112 +0,0 @@
-package sarama
-
-type ProduceResponseBlock struct {
-	Err    KError
-	Offset int64
-}
-
-func (pr *ProduceResponseBlock) decode(pd packetDecoder) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	pr.Err = KError(tmp)
-
-	pr.Offset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-type ProduceResponse struct {
-	Blocks map[string]map[int32]*ProduceResponseBlock
-}
-
-func (pr *ProduceResponse) decode(pd packetDecoder) (err error) {
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numBlocks, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		pr.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
-
-		for j := 0; j < numBlocks; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := new(ProduceResponseBlock)
-			err = block.decode(pd)
-			if err != nil {
-				return err
-			}
-			pr.Blocks[name][id] = block
-		}
-	}
-
-	return nil
-}
-
-func (pr *ProduceResponse) encode(pe packetEncoder) error {
-	err := pe.putArrayLength(len(pr.Blocks))
-	if err != nil {
-		return err
-	}
-	for topic, partitions := range pr.Blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-		for id, prb := range partitions {
-			pe.putInt32(id)
-			pe.putInt16(int16(prb.Err))
-			pe.putInt64(prb.Offset)
-		}
-	}
-	return nil
-}
-
-func (pr *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
-	if pr.Blocks == nil {
-		return nil
-	}
-
-	if pr.Blocks[topic] == nil {
-		return nil
-	}
-
-	return pr.Blocks[topic][partition]
-}
-
-// Testing API
-
-func (pr *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
-	if pr.Blocks == nil {
-		pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
-	}
-	byTopic, ok := pr.Blocks[topic]
-	if !ok {
-		byTopic = make(map[int32]*ProduceResponseBlock)
-		pr.Blocks[topic] = byTopic
-	}
-	byTopic[partition] = &ProduceResponseBlock{Err: err}
-}

+ 0 - 67
produce_response_test.go

@@ -1,67 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	produceResponseNoBlocks = []byte{
-		0x00, 0x00, 0x00, 0x00}
-
-	produceResponseManyBlocks = []byte{
-		0x00, 0x00, 0x00, 0x02,
-
-		0x00, 0x03, 'f', 'o', 'o',
-		0x00, 0x00, 0x00, 0x00,
-
-		0x00, 0x03, 'b', 'a', 'r',
-		0x00, 0x00, 0x00, 0x02,
-
-		0x00, 0x00, 0x00, 0x01,
-		0x00, 0x00,
-		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
-
-		0x00, 0x00, 0x00, 0x02,
-		0x00, 0x02,
-		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-)
-
-func TestProduceResponse(t *testing.T) {
-	response := ProduceResponse{}
-
-	testDecodable(t, "no blocks", &response, produceResponseNoBlocks)
-	if len(response.Blocks) != 0 {
-		t.Error("Decoding produced", len(response.Blocks), "topics where there were none")
-	}
-
-	testDecodable(t, "many blocks", &response, produceResponseManyBlocks)
-	if len(response.Blocks) != 2 {
-		t.Error("Decoding produced", len(response.Blocks), "topics where there were 2")
-	}
-	if len(response.Blocks["foo"]) != 0 {
-		t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none")
-	}
-	if len(response.Blocks["bar"]) != 2 {
-		t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two")
-	}
-	block := response.GetBlock("bar", 1)
-	if block == nil {
-		t.Error("Decoding did not produce a block for bar/1")
-	} else {
-		if block.Err != NoError {
-			t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err))
-		}
-		if block.Offset != 0xFF {
-			t.Error("Decoding failed for bar/1/Offset, got:", block.Offset)
-		}
-	}
-	block = response.GetBlock("bar", 2)
-	if block == nil {
-		t.Error("Decoding did not produce a block for bar/2")
-	} else {
-		if block.Err != InvalidMessage {
-			t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err))
-		}
-		if block.Offset != 0 {
-			t.Error("Decoding failed for bar/2/Offset, got:", block.Offset)
-		}
-	}
-}

+ 0 - 487
producer.go

@@ -1,487 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"sync"
-	"time"
-)
-
-// ProducerConfig is used to pass multiple configuration options to NewProducer.
-//
-// If MaxBufferTime=MaxBufferedBytes=0, messages will be delivered immediately and
-// constantly, but if multiple messages are received while a roundtrip to kafka
-// is in progress, they will both be combined into the next request. In this
-// mode, errors are not returned from SendMessage, but over the Errors()
-// channel.
-//
-// With MaxBufferTime and/or MaxBufferedBytes set to values > 0, sarama will
-// buffer messages before sending, to reduce traffic.
-type ProducerConfig struct {
-	Partitioner      Partitioner      // Chooses the partition to send messages to, or randomly if this is nil.
-	RequiredAcks     RequiredAcks     // The level of acknowledgement reliability needed from the broker (defaults to no acknowledgement).
-	Timeout          int32            // The maximum time in ms the broker will wait the receipt of the number of RequiredAcks.
-	Compression      CompressionCodec // The type of compression to use on messages (defaults to no compression).
-	MaxBufferedBytes uint32           // The maximum number of bytes to buffer per-broker before sending to Kafka.
-	MaxBufferTime    uint32           // The maximum number of milliseconds to buffer messages before sending to a broker.
-}
-
-// Producer publishes Kafka messages. It routes messages to the correct broker
-// for the provided topic-partition, refreshing metadata as appropriate, and
-// parses responses for errors. You must call Close() on a producer to avoid
-// leaks: it may not be garbage-collected automatically when it passes out of
-// scope (this is in addition to calling Close on the underlying client, which
-// is still necessary).
-//
-// The default values for MaxBufferedBytes and MaxBufferTime cause sarama to
-// deliver messages immediately, but to buffer subsequent messages while a
-// previous request is in-flight. This is often the correct behaviour.
-//
-// If synchronous operation is desired, you can use SendMessage. This will cause
-// sarama to block until the broker has returned a value. Normally, you will
-// want to use QueueMessage instead, and read the error back from the Errors()
-// channel. Note that when using QueueMessage, you *must* read the values from
-// the Errors() channel, or sarama will block indefinitely after a few requests.
-type Producer struct {
-	client          *Client
-	config          ProducerConfig
-	brokerProducers map[*Broker]*brokerProducer
-	m               sync.RWMutex
-	errors          chan error
-	deliveryLocks   map[topicPartition]chan bool
-	dm              sync.RWMutex
-}
-
-type brokerProducer struct {
-	mapM          sync.Mutex
-	messages      map[topicPartition][]*produceMessage
-	bufferedBytes uint32
-	flushNow      chan bool
-	broker        *Broker
-	stopper       chan bool
-	done          chan bool
-	hasMessages   chan bool
-}
-
-type topicPartition struct {
-	topic     string
-	partition int32
-}
-
-// NewProducer creates a new Producer using the given client.
-func NewProducer(client *Client, config *ProducerConfig) (*Producer, error) {
-	if config == nil {
-		config = NewProducerConfig()
-	}
-
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
-	return &Producer{
-		client:          client,
-		config:          *config,
-		errors:          make(chan error, 16),
-		deliveryLocks:   make(map[topicPartition]chan bool),
-		brokerProducers: make(map[*Broker]*brokerProducer),
-	}, nil
-}
-
-// When operating in asynchronous mode, provides access to errors generated
-// while parsing ProduceResponses from kafka. Should never be called in
-// synchronous mode.
-func (p *Producer) Errors() chan error {
-	return p.errors
-}
-
-// Close shuts down the producer and flushes any messages it may have buffered.
-// You must call this function before a producer object passes out of scope, as
-// it may otherwise leak memory. You must call this before calling Close on the
-// underlying client.
-func (p *Producer) Close() error {
-	for _, bp := range p.brokerProducers {
-		bp.Close()
-	}
-	return nil
-}
-
-// QueueMessage sends a message with the given key and value to the given topic.
-// The partition to send to is selected by the Producer's Partitioner. To send
-// strings as either key or value, see the StringEncoder type.
-//
-// QueueMessage uses buffering semantics to reduce the nubmer of requests to the
-// broker. The buffer logic is tunable with config.MaxBufferedBytes and
-// config.MaxBufferTime.
-//
-// QueueMessage will return an error if it's unable to construct the message
-// (unlikely), but network and response errors must be read from Errors(), since
-// QueueMessage uses asynchronous delivery. Note that you MUST read back from
-// Errors(), otherwise the producer will stall after some number of errors.
-//
-// If you care about message ordering, you should not call QueueMessage and
-// SendMessage on the same Producer. Either, used alone, preserves ordering,
-// however.
-func (p *Producer) QueueMessage(topic string, key, value Encoder) error {
-	return p.genericSendMessage(topic, key, value, false)
-}
-
-// SendMessage sends a message with the given key and value to the given topic.
-// The partition to send to is selected by the Producer's Partitioner. To send
-// strings as either key or value, see the StringEncoder type.
-//
-// Unlike QueueMessage, SendMessage operates synchronously, and will block until
-// the response is received from the broker, returning any error generated in
-// the process. Reading from Errors() may interfere with the operation of
-// SendMessage().
-//
-// If you care about message ordering, you should not call QueueMessage and
-// SendMessage on the same Producer.
-func (p *Producer) SendMessage(topic string, key, value Encoder) (err error) {
-	return p.genericSendMessage(topic, key, value, true)
-}
-
-func (p *Producer) genericSendMessage(topic string, key, value Encoder, synchronous bool) (err error) {
-	var keyBytes, valBytes []byte
-
-	if key != nil {
-		if keyBytes, err = key.Encode(); err != nil {
-			return err
-		}
-	}
-	if value != nil {
-		if valBytes, err = value.Encode(); err != nil {
-			return err
-		}
-	}
-
-	partition, err := p.choosePartition(topic, key)
-	if err != nil {
-		return err
-	}
-
-	// produce_message.go
-	msg := &produceMessage{
-		tp:    topicPartition{topic, partition},
-		key:   keyBytes,
-		value: valBytes,
-		sync:  synchronous,
-	}
-
-	// produce_message.go
-	return msg.enqueue(p)
-}
-
-func (p *Producer) addMessage(msg *produceMessage) error {
-	bp, err := p.brokerProducerFor(msg.tp)
-	if err != nil {
-		return err
-	}
-	bp.addMessage(msg, p.config.MaxBufferedBytes)
-	return nil
-}
-
-func (p *Producer) brokerProducerFor(tp topicPartition) (*brokerProducer, error) {
-	broker, err := p.client.Leader(tp.topic, tp.partition)
-	if err != nil {
-		return nil, err
-	}
-
-	p.m.RLock()
-	bp, ok := p.brokerProducers[broker]
-	p.m.RUnlock()
-	if !ok {
-		p.m.Lock()
-		bp, ok = p.brokerProducers[broker]
-		if !ok {
-			bp = p.newBrokerProducer(broker)
-			p.brokerProducers[broker] = bp
-		}
-		p.m.Unlock()
-	}
-
-	return bp, nil
-}
-
-func (p *Producer) newBrokerProducer(broker *Broker) *brokerProducer {
-	bp := &brokerProducer{
-		messages:    make(map[topicPartition][]*produceMessage),
-		flushNow:    make(chan bool, 1),
-		broker:      broker,
-		stopper:     make(chan bool),
-		done:        make(chan bool),
-		hasMessages: make(chan bool, 1),
-	}
-
-	maxBufferTime := time.Duration(p.config.MaxBufferTime) * time.Millisecond
-
-	var wg sync.WaitGroup
-	wg.Add(1)
-
-	go func() {
-		timer := time.NewTimer(maxBufferTime)
-		var shutdownRequired bool
-		wg.Done()
-		for {
-			select {
-			case <-bp.flushNow:
-				if shutdownRequired = bp.flush(p); shutdownRequired {
-					goto shutdown
-				}
-			case <-timer.C:
-				if shutdownRequired = bp.flushIfAnyMessages(p); shutdownRequired {
-					goto shutdown
-				}
-			case <-bp.stopper:
-				goto shutdown
-			}
-			timer.Reset(maxBufferTime)
-		}
-	shutdown:
-		delete(p.brokerProducers, bp.broker)
-		bp.flushIfAnyMessages(p)
-		p.client.disconnectBroker(bp.broker)
-		close(bp.flushNow)
-		close(bp.hasMessages)
-		close(bp.done)
-	}()
-	wg.Wait() // don't return until the G has started
-
-	return bp
-}
-
-func (bp *brokerProducer) addMessage(msg *produceMessage, maxBufferBytes uint32) {
-	bp.mapM.Lock()
-	if msg.retried {
-		// Prepend: Deliver first, before any more recently-added messages.
-		bp.messages[msg.tp] = append([]*produceMessage{msg}, bp.messages[msg.tp]...)
-	} else {
-		// Append
-		bp.messages[msg.tp] = append(bp.messages[msg.tp], msg)
-	}
-	bp.bufferedBytes += msg.byteSize()
-
-	select {
-	case bp.hasMessages <- true:
-	default:
-	}
-
-	bp.mapM.Unlock()
-	bp.flushIfOverCapacity(maxBufferBytes)
-}
-
-func (bp *brokerProducer) flushIfOverCapacity(maxBufferBytes uint32) {
-	if bp.bufferedBytes > maxBufferBytes {
-		select {
-		case bp.flushNow <- true:
-		default:
-		}
-	}
-}
-
-func (bp *brokerProducer) flushIfAnyMessages(p *Producer) (shutdownRequired bool) {
-	select {
-	case <-bp.hasMessages:
-		select {
-		case bp.hasMessages <- true:
-		default:
-		}
-		return bp.flush(p)
-	default:
-	}
-	return false
-}
-
-func (bp *brokerProducer) flush(p *Producer) (shutdownRequired bool) {
-	var prb produceRequestBuilder
-
-	// only deliver messages for topic-partitions that are not currently being delivered.
-	bp.mapM.Lock()
-	for tp, messages := range bp.messages {
-		if len(messages) > 0 && p.tryAcquireDeliveryLock(tp) {
-			prb = append(prb, messages...)
-			delete(bp.messages, tp)
-			p.releaseDeliveryLock(tp)
-		}
-	}
-	bp.mapM.Unlock()
-
-	if len(prb) > 0 {
-		bp.mapM.Lock()
-		bp.bufferedBytes -= prb.byteSize()
-		bp.mapM.Unlock()
-
-		return bp.flushRequest(p, prb, func(err error) {
-			if err != nil {
-				Logger.Println(err)
-			}
-			p.errors <- err
-		})
-	}
-	return false
-}
-
-func (bp *brokerProducer) flushRequest(p *Producer, prb produceRequestBuilder, errorCb func(error)) (shutdownRequired bool) {
-	// produce_message.go
-	req := prb.toRequest(&p.config)
-	response, err := bp.broker.Produce(p.client.id, req)
-
-	switch err {
-	case nil:
-		break
-	case EncodingError:
-		// No sense in retrying; it'll just fail again. But what about all the other
-		// messages that weren't invalid? Really, this is a "shit's broke real good"
-		// scenario, so logging it and moving on is probably acceptable.
-		errorCb(err)
-		return false
-	default:
-		overlimit := 0
-		prb.reverseEach(func(msg *produceMessage) {
-			if err := msg.reenqueue(p); err != nil {
-				overlimit++
-			}
-		})
-		if overlimit > 0 {
-			errorCb(DroppedMessagesError{overlimit, nil})
-		}
-		return true
-	}
-
-	// When does this ever actually happen, and why don't we explode when it does?
-	// This seems bad.
-	if response == nil {
-		errorCb(nil)
-		return false
-	}
-
-	for topic, d := range response.Blocks {
-		for partition, block := range d {
-			if block == nil {
-				// IncompleteResponse. Here we just drop all the messages; we don't know whether
-				// they were successfully sent or not. Non-ideal, but how often does it happen?
-				errorCb(DroppedMessagesError{len(prb), IncompleteResponse})
-			}
-			switch block.Err {
-			case NoError:
-				// All the messages for this topic-partition were delivered successfully!
-				// Unlock delivery for this topic-partition and discard the produceMessage objects.
-				errorCb(nil)
-			case UnknownTopicOrPartition, NotLeaderForPartition, LeaderNotAvailable:
-				p.client.RefreshTopicMetadata(topic)
-
-				overlimit := 0
-				prb.reverseEach(func(msg *produceMessage) {
-					if msg.hasTopicPartition(topic, partition) {
-						if err := msg.reenqueue(p); err != nil {
-							overlimit++
-						}
-					}
-				})
-				if overlimit > 0 {
-					errorCb(DroppedMessagesError{overlimit, nil})
-				}
-			default:
-				errorCb(DroppedMessagesError{len(prb), err})
-			}
-		}
-	}
-	return false
-}
-
-func (bp *brokerProducer) Close() error {
-	select {
-	case <-bp.stopper:
-		return fmt.Errorf("already closed or closing")
-	default:
-		close(bp.stopper)
-		<-bp.done
-	}
-	return nil
-}
-
-func (p *Producer) tryAcquireDeliveryLock(tp topicPartition) bool {
-	p.dm.RLock()
-	ch, ok := p.deliveryLocks[tp]
-	p.dm.RUnlock()
-	if !ok {
-		p.dm.Lock()
-		ch, ok = p.deliveryLocks[tp]
-		if !ok {
-			ch = make(chan bool, 1)
-			p.deliveryLocks[tp] = ch
-		}
-		p.dm.Unlock()
-	}
-
-	select {
-	case ch <- true:
-		return true
-	default:
-		return false
-	}
-}
-
-func (p *Producer) releaseDeliveryLock(tp topicPartition) {
-	p.dm.RLock()
-	ch := p.deliveryLocks[tp]
-	p.dm.RUnlock()
-	select {
-	case <-ch:
-	default:
-		panic("Serious logic bug: releaseDeliveryLock called without acquiring lock first.")
-	}
-}
-
-func (p *Producer) choosePartition(topic string, key Encoder) (int32, error) {
-	partitions, err := p.client.Partitions(topic)
-	if err != nil {
-		return -1, err
-	}
-
-	numPartitions := int32(len(partitions))
-
-	if numPartitions == 0 {
-		return -1, LeaderNotAvailable
-	}
-
-	choice := p.config.Partitioner.Partition(key, numPartitions)
-
-	if choice < 0 || choice >= numPartitions {
-		return -1, InvalidPartition
-	}
-
-	return partitions[choice], nil
-}
-
-// Creates a new ProducerConfig instance with sensible defaults.
-func NewProducerConfig() *ProducerConfig {
-	return &ProducerConfig{
-		Partitioner:  NewRandomPartitioner(),
-		RequiredAcks: WaitForLocal,
-	}
-}
-
-// Validates a ProducerConfig instance. It will return a
-// ConfigurationError if the specified value doesn't make sense.
-func (config *ProducerConfig) Validate() error {
-	if config.RequiredAcks < -1 {
-		return ConfigurationError("Invalid RequiredAcks")
-	}
-
-	if config.Timeout < 0 {
-		return ConfigurationError("Invalid Timeout")
-	}
-
-	if config.MaxBufferedBytes == 0 {
-		return ConfigurationError("Invalid MaxBufferedBytes")
-	}
-
-	if config.MaxBufferTime == 0 {
-		return ConfigurationError("Invalid MaxBufferTime")
-	}
-
-	if config.Partitioner == nil {
-		return ConfigurationError("No partitioner set")
-	}
-
-	return nil
-}

+ 0 - 344
producer_test.go

@@ -1,344 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"testing"
-	"time"
-)
-
-const TestMessage = "ABC THE MESSAGE"
-
-func defaultProducerConfig() *ProducerConfig {
-	config := NewProducerConfig()
-	config.MaxBufferTime = 1000000                                // don't flush based on time
-	config.MaxBufferedBytes = uint32((len(TestMessage) * 10) - 1) // flush after 10 messages
-	return config
-}
-
-func TestSimpleProducer(t *testing.T) {
-
-	mb1 := NewMockBroker(t, 1)
-	mb2 := NewMockBroker(t, 2)
-	defer mb1.Close()
-	defer mb2.Close()
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb2.Addr(), int32(mb2.BrokerID()))
-	mdr.AddTopicPartition("my_topic", 0, 2)
-	mb1.Returns(mdr)
-
-	pr := new(ProduceResponse)
-	pr.AddTopicPartition("my_topic", 0, NoError)
-	mb2.Returns(pr)
-
-	client, err := NewClient("client_id", []string{mb1.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	producer, err := NewProducer(client, defaultProducerConfig())
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer producer.Close()
-
-	// flush only on 10th and final message
-	returns := []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
-	for _, f := range returns {
-		sendMessage(t, producer, "my_topic", TestMessage, f)
-	}
-}
-
-func TestSimpleSyncProducer(t *testing.T) {
-
-	mb1 := NewMockBroker(t, 1)
-	mb2 := NewMockBroker(t, 2)
-	defer mb1.Close()
-	defer mb2.Close()
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb2.Addr(), int32(mb2.BrokerID()))
-	mdr.AddTopicPartition("my_topic", 1, 2)
-	mb1.Returns(mdr)
-
-	pr := new(ProduceResponse)
-	pr.AddTopicPartition("my_topic", 1, NoError)
-
-	for i := 0; i < 10; i++ {
-		mb2.Returns(pr)
-	}
-
-	client, err := NewClient("client_id", []string{mb1.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	producer, err := NewProducer(client, defaultProducerConfig())
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer producer.Close()
-
-	for i := 0; i < 10; i++ {
-		sendSyncMessage(t, producer, "my_topic", TestMessage)
-	}
-}
-
-func TestMultipleFlushes(t *testing.T) {
-
-	mb1 := NewMockBroker(t, 1)
-	mb2 := NewMockBroker(t, 2)
-	defer mb1.Close()
-	defer mb2.Close()
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb2.Addr(), int32(mb2.BrokerID()))
-	mdr.AddTopicPartition("my_topic", 0, 2)
-	mb1.Returns(mdr)
-
-	pr := new(ProduceResponse)
-	pr.AddTopicPartition("my_topic", 0, NoError)
-	pr.AddTopicPartition("my_topic", 0, NoError)
-	mb2.Returns(pr)
-	mb2.Returns(pr) // yes, twice.
-
-	client, err := NewClient("client_id", []string{mb1.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	config := defaultProducerConfig()
-	// So that we flush after the 2nd message.
-	config.MaxBufferedBytes = uint32((len(TestMessage) * 5) - 1)
-	producer, err := NewProducer(client, config)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer producer.Close()
-
-	returns := []int{0, 0, 0, 0, 1, 0, 0, 0, 0, 1}
-	for _, f := range returns {
-		sendMessage(t, producer, "my_topic", TestMessage, f)
-	}
-}
-
-func TestMultipleProducer(t *testing.T) {
-
-	mb1 := NewMockBroker(t, 1)
-	mb2 := NewMockBroker(t, 2)
-	mb3 := NewMockBroker(t, 3)
-	defer mb1.Close()
-	defer mb2.Close()
-	defer mb3.Close()
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb2.Addr(), int32(mb2.BrokerID()))
-	mdr.AddBroker(mb3.Addr(), int32(mb3.BrokerID()))
-	mdr.AddTopicPartition("topic_a", 0, 2)
-	mdr.AddTopicPartition("topic_b", 0, 3)
-	mdr.AddTopicPartition("topic_c", 0, 3)
-	mb1.Returns(mdr)
-
-	pr1 := new(ProduceResponse)
-	pr1.AddTopicPartition("topic_a", 0, NoError)
-	mb2.Returns(pr1)
-
-	pr2 := new(ProduceResponse)
-	pr2.AddTopicPartition("topic_b", 0, NoError)
-	pr2.AddTopicPartition("topic_c", 0, NoError)
-	mb3.Returns(pr2)
-
-	client, err := NewClient("client_id", []string{mb1.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	producer, err := NewProducer(client, defaultProducerConfig())
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer producer.Close()
-
-	// flush only on 10th and final message
-	returns := []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
-	for _, f := range returns {
-		sendMessage(t, producer, "topic_a", TestMessage, f)
-	}
-
-	// no flushes
-	returns = []int{0, 0, 0, 0, 0}
-	for _, f := range returns {
-		sendMessage(t, producer, "topic_b", TestMessage, f)
-	}
-
-	// flush both topic_b and topic_c on 5th (ie. 10th for this broker)
-	returns = []int{0, 0, 0, 0, 2}
-	for _, f := range returns {
-		sendMessage(t, producer, "topic_c", TestMessage, f)
-	}
-}
-
-// Here we test that when two messages are sent in the same buffered request,
-// and more messages are enqueued while the request is pending, everything
-// happens correctly; that is, the first messages are retried before the next
-// batch is allowed to submit.
-func TestFailureRetry(t *testing.T) {
-	t.Skip("not yet working after mockbroker refactor")
-
-	mb1 := NewMockBroker(t, 1)
-	mb2 := NewMockBroker(t, 2)
-	mb3 := NewMockBroker(t, 3)
-
-	mdr := new(MetadataResponse)
-	mdr.AddBroker(mb2.Addr(), int32(mb2.BrokerID()))
-	mdr.AddBroker(mb3.Addr(), int32(mb3.BrokerID()))
-	mdr.AddTopicPartition("topic_a", 0, 2)
-	mdr.AddTopicPartition("topic_b", 0, 3)
-	mdr.AddTopicPartition("topic_c", 0, 3)
-	mb1.Returns(mdr)
-
-	/* mb1.ExpectMetadataRequest(). */
-	/* 	AddBroker(mb2). */
-	/* 	AddBroker(mb3). */
-	/* 	AddTopicPartition("topic_a", 0, 2). */
-	/* 	AddTopicPartition("topic_b", 0, 2). */
-	/* 	AddTopicPartition("topic_c", 0, 3) */
-
-	pr := new(ProduceResponse)
-	pr.AddTopicPartition("topic_a", 0, NoError)
-	pr.AddTopicPartition("topic_b", 0, NotLeaderForPartition)
-	mb2.Returns(pr)
-
-	/* mb2.ExpectProduceRequest(). */
-	/* 	AddTopicPartition("topic_a", 0, 1, NoError). */
-	/* 	AddTopicPartition("topic_b", 0, 1, NotLeaderForPartition) */
-
-	// The fact that mb2 is chosen here is not well-defined. In theory,
-	// it's a random choice between mb1, mb2, and mb3. Go's hash iteration
-	// isn't quite as random as claimed, though, it seems. Maybe because
-	// the same random seed is used each time?
-	mdr2 := new(MetadataResponse)
-	mdr2.AddBroker(mb3.Addr(), int32(mb3.BrokerID()))
-	mdr2.AddTopicPartition("topic_b", 0, 3)
-	mb2.Returns(mdr2)
-
-	/* mb2.ExpectMetadataRequest(). */
-	/* 	AddBroker(mb3). */
-	/* 	AddTopicPartition("topic_b", 0, 3) */
-
-	pr2 := new(ProduceResponse)
-	pr2.AddTopicPartition("topic_c", 0, NoError)
-	pr2.AddTopicPartition("topic_b", 0, NoError)
-	mb3.Returns(pr2)
-
-	/* mb3.ExpectProduceRequest(). */
-	/* 	AddTopicPartition("topic_c", 0, 1, NoError). */
-	/* 	AddTopicPartition("topic_b", 0, 1, NoError) */
-
-	client, err := NewClient("client_id", []string{mb1.Addr()}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer client.Close()
-
-	producer, err := NewProducer(client, defaultProducerConfig())
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer producer.Close()
-
-	// Sent to mb3; does not flush because it's only half the cap.
-	// mb1: [__]
-	// mb2: [__]
-	// mb3: [__]
-	sendMessage(t, producer, "topic_c", TestMessage, 0)
-	// mb1: [__]
-	// mb2: [__]
-	// mb3: [X_]
-
-	// Sent to mb2; does not flush because it's only half the cap.
-	sendMessage(t, producer, "topic_a", TestMessage, 0)
-	// mb1: [__]
-	// mb2: [X_]
-	// mb3: [X_]
-
-	// Sent to mb2; flushes, errors (retriable).
-	// Three messages will be received:
-	//   * NoError for topic_a;
-	//   * NoError for topic_b;
-	//   * NoError for topic_c.
-	sendMessage(t, producer, "topic_b", TestMessage, 2)
-	// mb1: [__]
-	// mb2: [XX] <- flush!
-	// mb3: [X_]
-
-	// The topic_b message errors, and we should wind up here:
-
-	// mb1: [__]
-	// mb2: [__]
-	// mb3: [XX] <- topic_b reassigned to mb3 by metadata refresh, flushes.
-
-	defer mb1.Close()
-	defer mb2.Close()
-}
-
-func readMessage(t *testing.T, ch chan error) {
-	select {
-	case err := <-ch:
-		if err != nil {
-			t.Error(err)
-		}
-	case <-time.After(1 * time.Second):
-		t.Error(fmt.Errorf("Message was never received"))
-	}
-}
-
-func assertNoMessages(t *testing.T, ch chan error) {
-	select {
-	case x := <-ch:
-		t.Fatal(fmt.Errorf("unexpected value received: %#v", x))
-	case <-time.After(1 * time.Millisecond):
-	}
-}
-
-func ExampleProducer() {
-	client, err := NewClient("client_id", []string{"localhost:9092"}, NewClientConfig())
-	if err != nil {
-		panic(err)
-	} else {
-		fmt.Println("> connected")
-	}
-	defer client.Close()
-
-	producer, err := NewProducer(client, nil)
-	if err != nil {
-		panic(err)
-	}
-	defer producer.Close()
-
-	err = producer.SendMessage("my_topic", nil, StringEncoder("testing 123"))
-	if err != nil {
-		panic(err)
-	} else {
-		fmt.Println("> message sent")
-	}
-}
-
-func sendMessage(t *testing.T, producer *Producer, topic string, key string, expectedResponses int) {
-	err := producer.QueueMessage(topic, nil, StringEncoder(key))
-	if err != nil {
-		t.Error(err)
-	}
-	for i := 0; i < expectedResponses; i++ {
-		readMessage(t, producer.Errors())
-	}
-	assertNoMessages(t, producer.Errors())
-}
-
-func sendSyncMessage(t *testing.T, producer *Producer, topic string, key string) {
-	err := producer.SendMessage(topic, nil, StringEncoder(key))
-	if err != nil {
-		t.Error(err)
-	}
-	assertNoMessages(t, producer.Errors())
-}

+ 0 - 225
real_decoder.go

@@ -1,225 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"math"
-)
-
-type realDecoder struct {
-	raw   []byte
-	off   int
-	stack []pushDecoder
-}
-
-// primitives
-
-func (rd *realDecoder) getInt8() (int8, error) {
-	if rd.remaining() < 1 {
-		rd.off = len(rd.raw)
-		return -1, InsufficientData
-	}
-	tmp := int8(rd.raw[rd.off])
-	rd.off += binary.Size(tmp)
-	return tmp, nil
-}
-
-func (rd *realDecoder) getInt16() (int16, error) {
-	if rd.remaining() < 2 {
-		rd.off = len(rd.raw)
-		return -1, InsufficientData
-	}
-	tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
-	rd.off += binary.Size(tmp)
-	return tmp, nil
-}
-
-func (rd *realDecoder) getInt32() (int32, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return -1, InsufficientData
-	}
-	tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += binary.Size(tmp)
-	return tmp, nil
-}
-
-func (rd *realDecoder) getInt64() (int64, error) {
-	if rd.remaining() < 8 {
-		rd.off = len(rd.raw)
-		return -1, InsufficientData
-	}
-	tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
-	rd.off += binary.Size(tmp)
-	return tmp, nil
-}
-
-func (rd *realDecoder) getArrayLength() (int, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return -1, InsufficientData
-	}
-	tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-	if tmp > rd.remaining() {
-		rd.off = len(rd.raw)
-		return -1, InsufficientData
-	} else if tmp > 2*math.MaxUint16 {
-		return -1, DecodingError{Info: "getArrayLength failed: Invalid array length"}
-	}
-	return tmp, nil
-}
-
-// collections
-
-func (rd *realDecoder) getBytes() ([]byte, error) {
-	tmp, err := rd.getInt32()
-
-	if err != nil {
-		return nil, err
-	}
-
-	n := int(tmp)
-
-	switch {
-	case n < -1:
-		return nil, DecodingError{Info: "getBytes failed"}
-	case n == -1:
-		return nil, nil
-	case n == 0:
-		return make([]byte, 0), nil
-	case n > rd.remaining():
-		rd.off = len(rd.raw)
-		return nil, InsufficientData
-	}
-
-	tmpStr := rd.raw[rd.off : rd.off+n]
-	rd.off += n
-	return tmpStr, nil
-}
-
-func (rd *realDecoder) getString() (string, error) {
-	tmp, err := rd.getInt16()
-
-	if err != nil {
-		return "", err
-	}
-
-	n := int(tmp)
-
-	switch {
-	case n < -1:
-		return "", DecodingError{Info: "getString failed"}
-	case n == -1:
-		return "", nil
-	case n == 0:
-		return "", nil
-	case n > rd.remaining():
-		rd.off = len(rd.raw)
-		return "", InsufficientData
-	}
-
-	tmpStr := string(rd.raw[rd.off : rd.off+n])
-	rd.off += n
-	return tmpStr, nil
-}
-
-func (rd *realDecoder) getInt32Array() ([]int32, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return nil, InsufficientData
-	}
-	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-
-	if rd.remaining() < 4*n {
-		rd.off = len(rd.raw)
-		return nil, InsufficientData
-	}
-
-	if n == 0 {
-		return nil, nil
-	}
-
-	if n < 0 {
-		return nil, DecodingError{Info: "getInt32Array failed"}
-	}
-
-	ret := make([]int32, n)
-	for i := range ret {
-		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-		rd.off += binary.Size(ret[i])
-	}
-	return ret, nil
-}
-
-func (rd *realDecoder) getInt64Array() ([]int64, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return nil, InsufficientData
-	}
-	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-
-	if rd.remaining() < 8*n {
-		rd.off = len(rd.raw)
-		return nil, InsufficientData
-	}
-
-	if n == 0 {
-		return nil, nil
-	}
-
-	if n < 0 {
-		return nil, DecodingError{Info: "getInt64Array failed"}
-	}
-
-	ret := make([]int64, n)
-	for i := range ret {
-		ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
-		rd.off += binary.Size(ret[i])
-	}
-	return ret, nil
-}
-
-// subsets
-
-func (rd *realDecoder) remaining() int {
-	return len(rd.raw) - rd.off
-}
-
-func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
-	if length > rd.remaining() {
-		rd.off = len(rd.raw)
-		return nil, InsufficientData
-	}
-
-	start := rd.off
-	rd.off += length
-	return &realDecoder{raw: rd.raw[start:rd.off]}, nil
-}
-
-// stacks
-
-func (rd *realDecoder) push(in pushDecoder) error {
-	in.saveOffset(rd.off)
-
-	reserve := in.reserveLength()
-	if rd.remaining() < reserve {
-		rd.off = len(rd.raw)
-		return InsufficientData
-	}
-
-	rd.stack = append(rd.stack, in)
-
-	rd.off += reserve
-
-	return nil
-}
-
-func (rd *realDecoder) pop() error {
-	// this is go's ugly pop pattern (the inverse of append)
-	in := rd.stack[len(rd.stack)-1]
-	rd.stack = rd.stack[:len(rd.stack)-1]
-
-	return in.check(rd.off, rd.raw)
-}

+ 0 - 94
real_encoder.go

@@ -1,94 +0,0 @@
-package sarama
-
-import "encoding/binary"
-
-type realEncoder struct {
-	raw   []byte
-	off   int
-	stack []pushEncoder
-}
-
-// primitives
-
-func (re *realEncoder) putInt8(in int8) {
-	re.raw[re.off] = byte(in)
-	re.off += binary.Size(in)
-}
-
-func (re *realEncoder) putInt16(in int16) {
-	binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
-	re.off += binary.Size(in)
-}
-
-func (re *realEncoder) putInt32(in int32) {
-	binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
-	re.off += binary.Size(in)
-}
-
-func (re *realEncoder) putInt64(in int64) {
-	binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
-	re.off += binary.Size(in)
-}
-
-func (re *realEncoder) putArrayLength(in int) error {
-	re.putInt32(int32(in))
-	return nil
-}
-
-// collection
-
-func (re *realEncoder) putRawBytes(in []byte) error {
-	copy(re.raw[re.off:], in)
-	re.off += len(in)
-	return nil
-}
-
-func (re *realEncoder) putBytes(in []byte) error {
-	if in == nil {
-		re.putInt32(-1)
-		return nil
-	}
-	re.putInt32(int32(len(in)))
-	copy(re.raw[re.off:], in)
-	re.off += len(in)
-	return nil
-}
-
-func (re *realEncoder) putString(in string) error {
-	re.putInt16(int16(len(in)))
-	copy(re.raw[re.off:], in)
-	re.off += len(in)
-	return nil
-}
-
-func (re *realEncoder) putInt32Array(in []int32) error {
-	re.putArrayLength(len(in))
-	for _, val := range in {
-		re.putInt32(val)
-	}
-	return nil
-}
-
-func (re *realEncoder) putInt64Array(in []int64) error {
-	re.putArrayLength(len(in))
-	for _, val := range in {
-		re.putInt64(val)
-	}
-	return nil
-}
-
-// stacks
-
-func (re *realEncoder) push(in pushEncoder) {
-	in.saveOffset(re.off)
-	re.off += in.reserveLength()
-	re.stack = append(re.stack, in)
-}
-
-func (re *realEncoder) pop() error {
-	// this is go's ugly pop pattern (the inverse of append)
-	in := re.stack[len(re.stack)-1]
-	re.stack = re.stack[:len(re.stack)-1]
-
-	return in.run(re.off, re.raw)
-}

+ 0 - 29
request.go

@@ -1,29 +0,0 @@
-package sarama
-
-type requestEncoder interface {
-	encoder
-	key() int16
-	version() int16
-}
-
-type request struct {
-	correlationID int32
-	id            string
-	body          requestEncoder
-}
-
-func (r *request) encode(pe packetEncoder) (err error) {
-	pe.push(&lengthField{})
-	pe.putInt16(r.body.key())
-	pe.putInt16(r.body.version())
-	pe.putInt32(r.correlationID)
-	err = pe.putString(r.id)
-	if err != nil {
-		return err
-	}
-	err = r.body.encode(pe)
-	if err != nil {
-		return err
-	}
-	return pe.pop()
-}

+ 0 - 55
request_test.go

@@ -1,55 +0,0 @@
-package sarama
-
-import (
-	"bytes"
-	"testing"
-)
-
-var (
-	requestSimple = []byte{
-		0x00, 0x00, 0x00, 0x17, // msglen
-		0x06, 0x66,
-		0x00, 0xD2,
-		0x00, 0x00, 0x12, 0x34,
-		0x00, 0x08, 'm', 'y', 'C', 'l', 'i', 'e', 'n', 't',
-		0x00, 0x03, 'a', 'b', 'c'}
-)
-
-type testRequestBody struct {
-}
-
-func (s *testRequestBody) key() int16 {
-	return 0x666
-}
-
-func (s *testRequestBody) version() int16 {
-	return 0xD2
-}
-
-func (s *testRequestBody) encode(pe packetEncoder) error {
-	return pe.putString("abc")
-}
-
-func TestRequest(t *testing.T) {
-	request := request{correlationID: 0x1234, id: "myClient", body: new(testRequestBody)}
-	testEncodable(t, "simple", &request, requestSimple)
-}
-
-// not specific to request tests, just helper functions for testing structures that
-// implement the encoder or decoder interfaces that needed somewhere to live
-
-func testEncodable(t *testing.T, name string, in encoder, expect []byte) {
-	packet, err := encode(in)
-	if err != nil {
-		t.Error(err)
-	} else if !bytes.Equal(packet, expect) {
-		t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect)
-	}
-}
-
-func testDecodable(t *testing.T, name string, out decoder, in []byte) {
-	err := decode(in, out)
-	if err != nil {
-		t.Error("Decoding", name, "failed:", err)
-	}
-}

+ 0 - 23
response_header.go

@@ -1,23 +0,0 @@
-package sarama
-
-import "fmt"
-
-type responseHeader struct {
-	length        int32
-	correlationID int32
-}
-
-const maxMessageSize = 32 * 1024 * 1024 // 32MB
-
-func (r *responseHeader) decode(pd packetDecoder) (err error) {
-	r.length, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-	if r.length <= 4 || r.length > maxMessageSize {
-		return DecodingError{Info: fmt.Sprintf("Message too large or too small. Got %d", r.length)}
-	}
-
-	r.correlationID, err = pd.getInt32()
-	return err
-}

+ 0 - 21
response_header_test.go

@@ -1,21 +0,0 @@
-package sarama
-
-import "testing"
-
-var (
-	responseHeaderBytes = []byte{
-		0x00, 0x00, 0x0f, 0x00,
-		0x0a, 0xbb, 0xcc, 0xff}
-)
-
-func TestResponseHeader(t *testing.T) {
-	header := responseHeader{}
-
-	testDecodable(t, "response header", &header, responseHeaderBytes)
-	if header.length != 0xf00 {
-		t.Error("Decoding header length failed, got", header.length)
-	}
-	if header.correlationID != 0x0abbccff {
-		t.Error("Decoding header correlation id failed, got", header.correlationID)
-	}
-}

+ 0 - 21
sarama.go

@@ -1,21 +0,0 @@
-/*
-Package sarama provides client libraries for the Kafka 0.8 protocol. The Client, Producer and Consumer objects are the core of the high-level API. The Broker and Request/Response objects permit more precise control.
-
-The Request/Response objects and properties are mostly undocumented, as they line up exactly with the
-protocol fields documented by Kafka at https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
-*/
-package sarama
-
-import (
-	"io/ioutil"
-	"log"
-)
-
-// Logger is the instance of golang's log.Logger that Sarama writes connection
-// management events to. By default it is set to discard all log messages via ioutil.Discard,
-// but you can set it to redirect wherever you want.
-var Logger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
-
-// PanicHandler is called for recovering from panics spawned internally to the library (and thus
-// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
-var PanicHandler func(interface{})

+ 0 - 36
snappy.go

@@ -1,36 +0,0 @@
-package sarama
-
-import (
-	"bytes"
-	"code.google.com/p/snappy-go/snappy"
-	"encoding/binary"
-	_ "fmt"
-)
-
-var snappyMagic = []byte{130, 83, 78, 65, 80, 80, 89, 0}
-
-// SnappyEncode encodes binary data
-func SnappyEncode(src []byte) ([]byte, error) {
-	return snappy.Encode(nil, src)
-}
-
-// SnappyDecode decodes snappy data
-func SnappyDecode(src []byte) ([]byte, error) {
-	if bytes.Equal(src[:8], snappyMagic) {
-		pos := uint32(16)
-		max := uint32(len(src))
-		dst := make([]byte, 0)
-		for pos < max {
-			size := binary.BigEndian.Uint32(src[pos : pos+4])
-			pos = pos + 4
-			chunk, err := snappy.Decode(nil, src[pos:pos+size])
-			if err != nil {
-				return nil, err
-			}
-			pos = pos + size
-			dst = append(dst, chunk...)
-		}
-		return dst, nil
-	}
-	return snappy.Decode(nil, src)
-}

Разница между файлами не показана из-за своего большого размера
+ 0 - 16
snappy_test.go


+ 0 - 53
utils.go

@@ -1,53 +0,0 @@
-package sarama
-
-// make []int32 sortable so we can sort partition numbers
-type int32Slice []int32
-
-func (slice int32Slice) Len() int {
-	return len(slice)
-}
-
-func (slice int32Slice) Less(i, j int) bool {
-	return slice[i] < slice[j]
-}
-
-func (slice int32Slice) Swap(i, j int) {
-	slice[i], slice[j] = slice[j], slice[i]
-}
-
-func withRecover(fn func()) {
-	defer func() {
-		if PanicHandler != nil {
-			if err := recover(); err != nil {
-				PanicHandler(err)
-			}
-		}
-	}()
-
-	fn()
-}
-
-// Encoder is a simple interface for any type that can be encoded as an array of bytes
-// in order to be sent as the key or value of a Kafka message.
-type Encoder interface {
-	Encode() ([]byte, error)
-}
-
-// make strings and byte slices encodable for convenience so they can be used as keys
-// and/or values in kafka messages
-
-// StringEncoder implements the Encoder interface for Go strings so that you can do things like
-//	producer.SendMessage(nil, sarama.StringEncoder("hello world"))
-type StringEncoder string
-
-func (s StringEncoder) Encode() ([]byte, error) {
-	return []byte(s), nil
-}
-
-// ByteEncoder implements the Encoder interface for Go byte slices so that you can do things like
-//	producer.SendMessage(nil, sarama.ByteEncoder([]byte{0x00}))
-type ByteEncoder []byte
-
-func (b ByteEncoder) Encode() ([]byte, error) {
-	return b, nil
-}

Некоторые файлы не были показаны из-за большого количества измененных файлов