Pierre Grimaud vor 4 Jahren
Ursprung
Commit
31886822ab

+ 1 - 1
acl_create_response.go

@@ -2,7 +2,7 @@ package sarama
 
 import "time"
 
-//CreateAclsResponse is a an acl reponse creation type
+//CreateAclsResponse is a an acl response creation type
 type CreateAclsResponse struct {
 	ThrottleTime         time.Duration
 	AclCreationResponses []*AclCreationResponse

+ 2 - 2
alter_configs_response.go

@@ -2,13 +2,13 @@ package sarama
 
 import "time"
 
-//AlterConfigsResponse is a reponse type for alter config
+//AlterConfigsResponse is a response type for alter config
 type AlterConfigsResponse struct {
 	ThrottleTime time.Duration
 	Resources    []*AlterConfigsResourceResponse
 }
 
-//AlterConfigsResourceResponse is a reponse type for alter config resource
+//AlterConfigsResourceResponse is a response type for alter config resource
 type AlterConfigsResourceResponse struct {
 	ErrorCode int16
 	ErrorMsg  string

+ 3 - 3
alter_partition_reassignments_request_test.go

@@ -4,13 +4,13 @@ import "testing"
 
 var (
 	alterPartitionReassignmentsRequestNoBlock = []byte{
-		0, 0, 39, 16, // timout 10000
+		0, 0, 39, 16, // timeout 10000
 		1, // 1-1=0 blocks
 		0, // empty tagged fields
 	}
 
 	alterPartitionReassignmentsRequestOneBlock = []byte{
-		0, 0, 39, 16, // timout 10000
+		0, 0, 39, 16, // timeout 10000
 		2,                         // 2-1=1 block
 		6, 116, 111, 112, 105, 99, // topic name "topic" as compact string
 		2,          // 2-1=1 partitions
@@ -22,7 +22,7 @@ var (
 	}
 
 	alterPartitionReassignmentsAbortRequest = []byte{
-		0, 0, 39, 16, // timout 10000
+		0, 0, 39, 16, // timeout 10000
 		2,                         // 2-1=1 block
 		6, 116, 111, 112, 105, 99, // topic name "topic" as compact string
 		2,          // 2-1=1 partitions

+ 1 - 1
api_versions_response.go

@@ -1,6 +1,6 @@
 package sarama
 
-//ApiVersionsResponseBlock is an api version reponse block type
+//ApiVersionsResponseBlock is an api version response block type
 type ApiVersionsResponseBlock struct {
 	ApiKey     int16
 	MinVersion int16

+ 3 - 3
broker.go

@@ -73,7 +73,7 @@ const (
 	// server negotiate SASL by wrapping tokens with Kafka protocol headers.
 	SASLHandshakeV1 = int16(1)
 	// SASLExtKeyAuth is the reserved extension key name sent as part of the
-	// SASL/OAUTHBEARER intial client response
+	// SASL/OAUTHBEARER initial client response
 	SASLExtKeyAuth = "auth"
 )
 
@@ -369,7 +369,7 @@ func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
 	return response, nil
 }
 
-//CommitOffset return an Offset commit reponse or error
+//CommitOffset return an Offset commit response or error
 func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
 	response := new(OffsetCommitResponse)
 
@@ -1014,7 +1014,7 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int
 // When credentials are invalid, Kafka replies with a SaslAuthenticate response
 // containing an error code and message detailing the authentication failure.
 func (b *Broker) sendAndReceiveSASLPlainAuth() error {
-	// default to V0 to allow for backward compatability when SASL is enabled
+	// default to V0 to allow for backward compatibility when SASL is enabled
 	// but not the handshake
 	if b.conf.Net.SASL.Handshake {
 		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)

+ 1 - 1
broker_test.go

@@ -295,7 +295,7 @@ func TestSASLSCRAMSHAXXX(t *testing.T) {
 		scramChallengeResp string
 	}{
 		{
-			name:               "SASL/SCRAMSHAXXX successfull authentication",
+			name:               "SASL/SCRAMSHAXXX successful authentication",
 			mockHandshakeErr:   ErrNoError,
 			scramClient:        &MockSCRAMClient{},
 			scramChallengeResp: "pong",

+ 1 - 1
client_test.go

@@ -642,7 +642,7 @@ func TestClientController(t *testing.T) {
 	}
 	defer safeClose(t, client2)
 	if _, err = client2.Controller(); err != ErrUnsupportedVersion {
-		t.Errorf("Expected Contoller() to return %s, found %s", ErrUnsupportedVersion, err)
+		t.Errorf("Expected Controller() to return %s, found %s", ErrUnsupportedVersion, err)
 	}
 }
 

+ 1 - 1
examples/consumergroup/main.go

@@ -28,7 +28,7 @@ func init() {
 	flag.StringVar(&brokers, "brokers", "", "Kafka bootstrap brokers to connect to, as a comma separated list")
 	flag.StringVar(&group, "group", "", "Kafka consumer group definition")
 	flag.StringVar(&version, "version", "2.1.1", "Kafka cluster version")
-	flag.StringVar(&topics, "topics", "", "Kafka topics to be consumed, as a comma seperated list")
+	flag.StringVar(&topics, "topics", "", "Kafka topics to be consumed, as a comma separated list")
 	flag.StringVar(&assignor, "assignor", "range", "Consumer group partition assignment strategy (range, roundrobin, sticky)")
 	flag.BoolVar(&oldest, "oldest", true, "Kafka consumer consume initial offset from oldest")
 	flag.BoolVar(&verbose, "verbose", false, "Sarama logging")

+ 1 - 1
list_partition_reassignments_request_test.go

@@ -4,7 +4,7 @@ import "testing"
 
 var (
 	listPartitionReassignmentsRequestOneBlock = []byte{
-		0, 0, 39, 16, // timout 10000
+		0, 0, 39, 16, // timeout 10000
 		2,                         // 2-1=1 block
 		6, 116, 111, 112, 105, 99, // topic name "topic" as compact string
 		2,          // 2-1=1 partitions

+ 1 - 1
record_test.go

@@ -283,7 +283,7 @@ func TestRecordBatchDecoding(t *testing.T) {
 			r.length = varintLengthField{}
 		}
 		// The compression level is not restored on decoding. It is not needed
-		// anyway. We only set it here to ensure that comparision succeeds.
+		// anyway. We only set it here to ensure that comparison succeeds.
 		batch.CompressionLevel = tc.batch.CompressionLevel
 		if !reflect.DeepEqual(batch, tc.batch) {
 			t.Errorf(spew.Sprintf("invalid decode of %s\ngot %+v\nwanted %+v", tc.name, batch, tc.batch))

+ 1 - 1
sasl_authenticate_response_test.go

@@ -17,5 +17,5 @@ func TestSaslAuthenticateResponse(t *testing.T) {
 	response.ErrorMessage = &msg
 	response.SaslAuthBytes = []byte(`msg`)
 
-	testResponse(t, "authenticate reponse", response, saslAuthenticatResponseErr)
+	testResponse(t, "authenticate response", response, saslAuthenticatResponseErr)
 }

+ 1 - 1
tools/kafka-console-producer/README.md

@@ -25,7 +25,7 @@ A simple command line tool to produce a single message to Kafka.
     # Partitioning: by default, kafka-console-producer will partition as follows:
     # - manual partitioning if a -partition is provided
     # - hash partitioning by key if a -key is provided
-    # - random partioning otherwise.
+    # - random partitioning otherwise.
     #
     # You can override this using the -partitioner argument:
     echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random