Browse Source

Fix linter errors

Vlad Gorodetsky 4 years ago
parent
commit
07ff436a6d

+ 1 - 1
.golangci.yml

@@ -8,7 +8,7 @@ linters-settings:
   golint:
     min-confidence: 0
   gocyclo:
-    min-complexity: 95
+    min-complexity: 99
   maligned:
     suggest-new: true
   dupl:

+ 1 - 1
add_partitions_to_txn_request_test.go

@@ -19,7 +19,7 @@ func TestAddPartitionsToTxnRequest(t *testing.T) {
 		ProducerID:      8000,
 		ProducerEpoch:   0,
 		TopicPartitions: map[string][]int32{
-			"topic": []int32{1},
+			"topic": {1},
 		},
 	}
 

+ 1 - 1
add_partitions_to_txn_response_test.go

@@ -20,7 +20,7 @@ func TestAddPartitionsToTxnResponse(t *testing.T) {
 	resp := &AddPartitionsToTxnResponse{
 		ThrottleTime: 100 * time.Millisecond,
 		Errors: map[string][]*PartitionError{
-			"topic": []*PartitionError{&PartitionError{
+			"topic": {{
 				Err:       ErrInvalidTxnState,
 				Partition: 2,
 			}},

+ 0 - 6
admin.go

@@ -444,7 +444,6 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [
 }
 
 func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
-
 	if topic == "" {
 		return ErrInvalidTopic
 	}
@@ -557,7 +556,6 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry,
 }
 
 func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
-
 	var resources []*AlterConfigsResource
 	resources = append(resources, &AlterConfigsResource{
 		Type:          resourceType,
@@ -621,7 +619,6 @@ func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
 }
 
 func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
-
 	request := &DescribeAclsRequest{AclFilter: filter}
 
 	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
@@ -669,7 +666,6 @@ func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]Matchi
 		for _, mACL := range fr.MatchingAcls {
 			mAcls = append(mAcls, *mACL)
 		}
-
 	}
 	return mAcls, nil
 }
@@ -683,7 +679,6 @@ func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*Group
 			return nil, err
 		}
 		groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
-
 	}
 
 	for broker, brokerGroups := range groupsPerBroker {
@@ -726,7 +721,6 @@ func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err e
 			}
 
 			groupMaps <- groups
-
 		}(b, ca.conf)
 	}
 

+ 1 - 5
admin_test.go

@@ -959,7 +959,6 @@ func TestListConsumerGroups(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-
 }
 
 func TestListConsumerGroupsMultiBroker(t *testing.T) {
@@ -1024,7 +1023,6 @@ func TestListConsumerGroupsMultiBroker(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-
 }
 
 func TestListConsumerGroupOffsets(t *testing.T) {
@@ -1053,7 +1051,7 @@ func TestListConsumerGroupOffsets(t *testing.T) {
 	}
 
 	response, err := admin.ListConsumerGroupOffsets(group, map[string][]int32{
-		topic: []int32{0},
+		topic: {0},
 	})
 	if err != nil {
 		t.Fatalf("ListConsumerGroupOffsets failed with error %v", err)
@@ -1072,7 +1070,6 @@ func TestListConsumerGroupOffsets(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-
 }
 
 func TestDeleteConsumerGroup(t *testing.T) {
@@ -1102,7 +1099,6 @@ func TestDeleteConsumerGroup(t *testing.T) {
 	if err != nil {
 		t.Fatalf("DeleteConsumerGroup failed with error %v", err)
 	}
-
 }
 
 // TestRefreshMetaDataWithDifferentController ensures that the cached

+ 3 - 3
alter_configs_request_test.go

@@ -51,7 +51,7 @@ func TestAlterConfigsRequest(t *testing.T) {
 	configValue := "1000"
 	request = &AlterConfigsRequest{
 		Resources: []*AlterConfigsResource{
-			&AlterConfigsResource{
+			{
 				Type: TopicResource,
 				Name: "foo",
 				ConfigEntries: map[string]*string{
@@ -65,14 +65,14 @@ func TestAlterConfigsRequest(t *testing.T) {
 
 	request = &AlterConfigsRequest{
 		Resources: []*AlterConfigsResource{
-			&AlterConfigsResource{
+			{
 				Type: TopicResource,
 				Name: "foo",
 				ConfigEntries: map[string]*string{
 					"segment.ms": &configValue,
 				},
 			},
-			&AlterConfigsResource{
+			{
 				Type: TopicResource,
 				Name: "bar",
 				ConfigEntries: map[string]*string{

+ 1 - 1
alter_configs_response_test.go

@@ -33,7 +33,7 @@ func TestAlterConfigsResponse(t *testing.T) {
 
 	response = &AlterConfigsResponse{
 		Resources: []*AlterConfigsResourceResponse{
-			&AlterConfigsResourceResponse{
+			{
 				ErrorCode: 0,
 				ErrorMsg:  "",
 				Type:      TopicResource,

+ 0 - 1
async_producer_test.go

@@ -328,7 +328,6 @@ func (l *testLogger) Println(v ...interface{}) {
 }
 
 func TestAsyncProducerRecoveryWithRetriesDisabled(t *testing.T) {
-
 	tt := func(t *testing.T, kErr KError) {
 		seedBroker := NewMockBroker(t, 1)
 		leader1 := NewMockBroker(t, 2)

File diff suppressed because it is too large
+ 247 - 247
balance_strategy_test.go


+ 0 - 8
broker.go

@@ -189,7 +189,6 @@ func (b *Broker) Open(conf *Config) error {
 		}
 
 		if conf.Net.SASL.Enable {
-
 			b.connErr = b.authenticateViaSASL()
 
 			if b.connErr != nil {
@@ -961,7 +960,6 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error {
 	// default to V0 to allow for backward compatability when SASL is enabled
 	// but not the handshake
 	if b.conf.Net.SASL.Handshake {
-
 		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
 		if handshakeErr != nil {
 			Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
@@ -977,7 +975,6 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error {
 
 // sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol
 func (b *Broker) sendAndReceiveV0SASLPlainAuth() error {
-
 	length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
 	authBytes := make([]byte, length+4) //4 byte length header + auth data
 	binary.BigEndian.PutUint32(authBytes, uint32(length))
@@ -1068,7 +1065,6 @@ func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error {
 // if the broker responds with a challenge, in which case the token is
 // rejected.
 func (b *Broker) sendClientMessage(message []byte) (bool, error) {
-
 	requestTime := time.Now()
 	correlationID := b.correlationID
 
@@ -1108,7 +1104,6 @@ func (b *Broker) sendAndReceiveSASLSCRAMv1() error {
 	msg, err := scramClient.Step("")
 	if err != nil {
 		return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
-
 	}
 
 	for !scramClient.Done() {
@@ -1228,7 +1223,6 @@ func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, erro
 }
 
 func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) {
-
 	rb := &SaslAuthenticateRequest{initialResp}
 
 	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
@@ -1303,7 +1297,6 @@ func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
 	if b.brokerRequestLatency != nil {
 		b.brokerRequestLatency.Update(requestLatencyInMs)
 	}
-
 }
 
 func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
@@ -1322,7 +1315,6 @@ func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
 	if b.brokerRequestSize != nil {
 		b.brokerRequestSize.Update(requestSize)
 	}
-
 }
 
 func (b *Broker) registerMetrics() {

+ 0 - 10
broker_test.go

@@ -109,7 +109,6 @@ func TestSimpleBrokerCommunication(t *testing.T) {
 			t.Error(err)
 		}
 	}
-
 }
 
 var ErrTokenFailure = errors.New("Failure generating token")
@@ -131,7 +130,6 @@ func newTokenProvider(token *AccessToken, err error) *TokenProvider {
 }
 
 func TestSASLOAuthBearer(t *testing.T) {
-
 	testTable := []struct {
 		name                      string
 		authidentity              string
@@ -202,7 +200,6 @@ func TestSASLOAuthBearer(t *testing.T) {
 	}
 
 	for i, test := range testTable {
-
 		// mockBroker mocks underlying network logic and broker responses
 		mockBroker := NewMockBroker(t, 0)
 
@@ -327,7 +324,6 @@ func TestSASLSCRAMSHAXXX(t *testing.T) {
 	}
 
 	for i, test := range testTable {
-
 		// mockBroker mocks underlying network logic and broker responses
 		mockBroker := NewMockBroker(t, 0)
 		broker := NewBroker(mockBroker.Addr())
@@ -395,7 +391,6 @@ func TestSASLSCRAMSHAXXX(t *testing.T) {
 }
 
 func TestSASLPlainAuth(t *testing.T) {
-
 	testTable := []struct {
 		name             string
 		authidentity     string
@@ -427,7 +422,6 @@ func TestSASLPlainAuth(t *testing.T) {
 	}
 
 	for i, test := range testTable {
-
 		// mockBroker mocks underlying network logic and broker responses
 		mockBroker := NewMockBroker(t, 0)
 
@@ -570,7 +564,6 @@ func TestSASLReadTimeout(t *testing.T) {
 }
 
 func TestGSSAPIKerberosAuth_Authorize(t *testing.T) {
-
 	testTable := []struct {
 		name               string
 		error              error
@@ -686,11 +679,9 @@ func TestGSSAPIKerberosAuth_Authorize(t *testing.T) {
 
 		mockBroker.Close()
 	}
-
 }
 
 func TestBuildClientFirstMessage(t *testing.T) {
-
 	testTable := []struct {
 		name        string
 		token       *AccessToken
@@ -727,7 +718,6 @@ func TestBuildClientFirstMessage(t *testing.T) {
 	}
 
 	for i, test := range testTable {
-
 		actual, err := buildClientFirstMessage(test.token)
 
 		if !reflect.DeepEqual(test.expected, actual) {

+ 0 - 1
client.go

@@ -199,7 +199,6 @@ func (client *client) Brokers() []*Broker {
 func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
 	var err error
 	for broker := client.any(); broker != nil; broker = client.any() {
-
 		req := &InitProducerIDRequest{}
 
 		response, err := broker.InitProducerID(req)

+ 6 - 6
client_tls_test.go

@@ -86,7 +86,7 @@ func TestTLS(t *testing.T) {
 
 	// Keep server the same - it's the client that we're testing
 	serverTLSConfig := &tls.Config{
-		Certificates: []tls.Certificate{tls.Certificate{
+		Certificates: []tls.Certificate{{
 			Certificate: [][]byte{hostDer},
 			PrivateKey:  hostkey,
 		}},
@@ -103,7 +103,7 @@ func TestTLS(t *testing.T) {
 			Server:  serverTLSConfig,
 			Client: &tls.Config{
 				RootCAs: systemCerts,
-				Certificates: []tls.Certificate{tls.Certificate{
+				Certificates: []tls.Certificate{{
 					Certificate: [][]byte{clientDer},
 					PrivateKey:  clientkey,
 				}},
@@ -114,7 +114,7 @@ func TestTLS(t *testing.T) {
 			Server:  serverTLSConfig,
 			Client: &tls.Config{
 				RootCAs: pool,
-				Certificates: []tls.Certificate{tls.Certificate{
+				Certificates: []tls.Certificate{{
 					Certificate: [][]byte{clientDer},
 					PrivateKey:  hostkey,
 				}},
@@ -125,7 +125,7 @@ func TestTLS(t *testing.T) {
 			Server:  serverTLSConfig,
 			Client: &tls.Config{
 				RootCAs: pool,
-				Certificates: []tls.Certificate{tls.Certificate{
+				Certificates: []tls.Certificate{{
 					Certificate: [][]byte{hostDer},
 					PrivateKey:  clientkey,
 				}},
@@ -135,7 +135,7 @@ func TestTLS(t *testing.T) {
 			Succeed: false,
 			Server:  serverTLSConfig,
 			Client: &tls.Config{
-				Certificates: []tls.Certificate{tls.Certificate{
+				Certificates: []tls.Certificate{{
 					Certificate: [][]byte{clientDer},
 					PrivateKey:  clientkey,
 				}},
@@ -153,7 +153,7 @@ func TestTLS(t *testing.T) {
 			Server:  serverTLSConfig,
 			Client: &tls.Config{
 				RootCAs: pool,
-				Certificates: []tls.Certificate{tls.Certificate{
+				Certificates: []tls.Certificate{{
 					Certificate: [][]byte{clientDer},
 					PrivateKey:  clientkey,
 				}},

+ 0 - 1
config_test.go

@@ -194,7 +194,6 @@ func TestNetConfigValidates(t *testing.T) {
 				cfg.Net.SASL.GSSAPI.Username = "sarama"
 				cfg.Net.SASL.GSSAPI.Password = "sarama"
 				cfg.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf"
-
 			},
 			"Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used"},
 	}

+ 1 - 1
create_partitions_request_test.go

@@ -32,7 +32,7 @@ var (
 func TestCreatePartitionsRequest(t *testing.T) {
 	req := &CreatePartitionsRequest{
 		TopicPartitions: map[string]*TopicPartition{
-			"topic": &TopicPartition{
+			"topic": {
 				Count: 3,
 			},
 		},

+ 1 - 1
create_partitions_response_test.go

@@ -28,7 +28,7 @@ func TestCreatePartitionsResponse(t *testing.T) {
 	resp := &CreatePartitionsResponse{
 		ThrottleTime: 100 * time.Millisecond,
 		TopicPartitionErrors: map[string]*TopicPartitionError{
-			"topic": &TopicPartitionError{},
+			"topic": {},
 		},
 	}
 

+ 1 - 1
create_topics_request_test.go

@@ -31,7 +31,7 @@ func TestCreateTopicsRequest(t *testing.T) {
 				NumPartitions:     -1,
 				ReplicationFactor: -1,
 				ReplicaAssignment: map[int32][]int32{
-					0: []int32{0, 1, 2},
+					0: {0, 1, 2},
 				},
 				ConfigEntries: map[string]*string{
 					"retention.ms": &retention,

+ 1 - 1
create_topics_response_test.go

@@ -31,7 +31,7 @@ var (
 func TestCreateTopicsResponse(t *testing.T) {
 	resp := &CreateTopicsResponse{
 		TopicErrors: map[string]*TopicError{
-			"topic": &TopicError{
+			"topic": {
 				Err: ErrInvalidRequest,
 			},
 		},

+ 4 - 4
describe_configs_request_test.go

@@ -61,7 +61,7 @@ func TestDescribeConfigsRequestv0(t *testing.T) {
 	request = &DescribeConfigsRequest{
 		Version: 0,
 		Resources: []*ConfigResource{
-			&ConfigResource{
+			{
 				Type:        TopicResource,
 				Name:        "foo",
 				ConfigNames: configs,
@@ -74,12 +74,12 @@ func TestDescribeConfigsRequestv0(t *testing.T) {
 	request = &DescribeConfigsRequest{
 		Version: 0,
 		Resources: []*ConfigResource{
-			&ConfigResource{
+			{
 				Type:        TopicResource,
 				Name:        "foo",
 				ConfigNames: []string{"segment.ms", "retention.ms"},
 			},
-			&ConfigResource{
+			{
 				Type:        TopicResource,
 				Name:        "bar",
 				ConfigNames: []string{"segment.ms"},
@@ -91,7 +91,7 @@ func TestDescribeConfigsRequestv0(t *testing.T) {
 	request = &DescribeConfigsRequest{
 		Version: 0,
 		Resources: []*ConfigResource{
-			&ConfigResource{
+			{
 				Type: TopicResource,
 				Name: "foo",
 			},

+ 0 - 1
describe_configs_response.go

@@ -277,7 +277,6 @@ func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
 			}
 			r.Synonyms[i] = s
 		}
-
 	}
 	return nil
 }

+ 6 - 6
describe_configs_response_test.go

@@ -74,13 +74,13 @@ func TestDescribeConfigsResponsev0(t *testing.T) {
 
 	response = &DescribeConfigsResponse{
 		Version: 0, Resources: []*ResourceResponse{
-			&ResourceResponse{
+			{
 				ErrorCode: 0,
 				ErrorMsg:  "",
 				Type:      TopicResource,
 				Name:      "foo",
 				Configs: []*ConfigEntry{
-					&ConfigEntry{
+					{
 						Name:      "segment.ms",
 						Value:     "1000",
 						ReadOnly:  false,
@@ -108,13 +108,13 @@ func TestDescribeConfigsResponsev1(t *testing.T) {
 	response = &DescribeConfigsResponse{
 		Version: 1,
 		Resources: []*ResourceResponse{
-			&ResourceResponse{
+			{
 				ErrorCode: 0,
 				ErrorMsg:  "",
 				Type:      TopicResource,
 				Name:      "foo",
 				Configs: []*ConfigEntry{
-					&ConfigEntry{
+					{
 						Name:      "segment.ms",
 						Value:     "1000",
 						ReadOnly:  false,
@@ -143,13 +143,13 @@ func TestDescribeConfigsResponseWithSynonym(t *testing.T) {
 	response = &DescribeConfigsResponse{
 		Version: 1,
 		Resources: []*ResourceResponse{
-			&ResourceResponse{
+			{
 				ErrorCode: 0,
 				ErrorMsg:  "",
 				Type:      TopicResource,
 				Name:      "foo",
 				Configs: []*ConfigEntry{
-					&ConfigEntry{
+					{
 						Name:      "segment.ms",
 						Value:     "1000",
 						ReadOnly:  false,

+ 1 - 1
describe_log_dirs_request_test.go

@@ -22,7 +22,7 @@ func TestDescribeLogDirsRequest(t *testing.T) {
 	testRequest(t, "no topics", request, emptyDescribeLogDirsRequest)
 
 	request.DescribeTopics = []DescribeLogDirsRequestTopic{
-		DescribeLogDirsRequestTopic{
+		{
 			Topic:        "random",
 			PartitionIDs: []int32{25, 26},
 		},

+ 4 - 4
describe_log_dirs_response_test.go

@@ -42,20 +42,20 @@ func TestDescribeLogDirsResponse(t *testing.T) {
 	}
 
 	response.LogDirs = []DescribeLogDirsResponseDirMetadata{
-		DescribeLogDirsResponseDirMetadata{
+		{
 			ErrorCode: 0,
 			Path:      "/kafka",
 			Topics: []DescribeLogDirsResponseTopic{
-				DescribeLogDirsResponseTopic{
+				{
 					Topic: "random",
 					Partitions: []DescribeLogDirsResponsePartition{
-						DescribeLogDirsResponsePartition{
+						{
 							PartitionID: 25,
 							Size:        125,
 							OffsetLag:   0,
 							IsTemporary: false,
 						},
-						DescribeLogDirsResponsePartition{
+						{
 							PartitionID: 26,
 							Size:        100,
 							OffsetLag:   0,

+ 0 - 1
fetch_response.go

@@ -311,7 +311,6 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) {
 				return err
 			}
 		}
-
 	}
 	return nil
 }

+ 0 - 1
fetch_response_test.go

@@ -146,7 +146,6 @@ func TestEmptyFetchResponse(t *testing.T) {
 	if len(response.Blocks) != 0 {
 		t.Error("Decoding produced topic blocks where there were none.")
 	}
-
 }
 
 func TestOneMessageFetchResponse(t *testing.T) {

+ 0 - 1
functional_producer_test.go

@@ -173,7 +173,6 @@ func testProducingMessages(t *testing.T, config *Config) {
 				t.Fatalf("Unexpected message with index %d: %s", i, message.Value)
 			}
 		}
-
 	}
 	safeClose(t, consumer)
 	safeClose(t, client)

+ 0 - 1
gssapi_kerberos.go

@@ -200,7 +200,6 @@ func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient K
 
 /* This does the handshake for authorization */
 func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
-
 	kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
 	if err != nil {
 		Logger.Printf("Kerberos client error: %s", err)

+ 0 - 1
message.go

@@ -85,7 +85,6 @@ func (m *Message) encode(pe packetEncoder) error {
 		payload = m.compressedCache
 		m.compressedCache = nil
 	} else if m.Value != nil {
-
 		payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
 		if err != nil {
 			return err

+ 0 - 1
metadata_response.go

@@ -318,5 +318,4 @@ foundPartition:
 	pmatch.Isr = isr
 	pmatch.OfflineReplicas = offline
 	pmatch.Err = err
-
 }

+ 0 - 4
mockbroker.go

@@ -235,7 +235,6 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup)
 	var bytesWritten int
 	var bytesRead int
 	for {
-
 		buffer, err := b.readToBytes(conn)
 		if err != nil {
 			Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
@@ -245,7 +244,6 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup)
 
 		bytesWritten = 0
 		if !b.isGSSAPI(buffer) {
-
 			req, br, err := decodeRequest(bytes.NewReader(buffer))
 			bytesRead = br
 			if err != nil {
@@ -294,7 +292,6 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup)
 				break
 			}
 			bytesWritten = len(resHeader) + len(encodedRes)
-
 		} else {
 			// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
 			// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
@@ -317,7 +314,6 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup)
 			b.notifier(bytesRead, bytesWritten)
 		}
 		b.lock.Unlock()
-
 	}
 	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
 }

+ 0 - 1
offset_manager.go

@@ -280,7 +280,6 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest {
 			ConsumerID:              om.memberID,
 			ConsumerGroupGeneration: om.generation,
 		}
-
 	}
 
 	om.pomsLock.RLock()

+ 0 - 3
offset_manager_test.go

@@ -9,7 +9,6 @@ import (
 func initOffsetManagerWithBackoffFunc(t *testing.T, retention time.Duration,
 	backoffFunc func(retries, maxRetries int) time.Duration, config *Config) (om OffsetManager,
 	testClient Client, broker, coordinator *MockBroker) {
-
 	config.Metadata.Retry.Max = 1
 	if backoffFunc != nil {
 		config.Metadata.Retry.BackoffFunc = backoffFunc
@@ -56,7 +55,6 @@ func initOffsetManager(t *testing.T, retention time.Duration) (om OffsetManager,
 
 func initPartitionOffsetManager(t *testing.T, om OffsetManager,
 	coordinator *MockBroker, initialOffset int64, metadata string) PartitionOffsetManager {
-
 	fetchResponse := new(OffsetFetchResponse)
 	fetchResponse.AddBlock("my_topic", 0, &OffsetFetchResponseBlock{
 		Err:      ErrNoError,
@@ -122,7 +120,6 @@ func TestNewOffsetManagerOffsetsAutoCommit(t *testing.T) {
 	// Tests to validate configuration of `Consumer.Offsets.AutoCommit.Enable`
 	for _, tt := range offsetsautocommitTestTable {
 		t.Run(tt.name, func(t *testing.T) {
-
 			config := NewConfig()
 			if tt.set {
 				config.Consumer.Offsets.AutoCommit.Enable = tt.enable

+ 6 - 6
produce_set_test.go

@@ -228,15 +228,15 @@ func TestProduceSetV3RequestBuilding(t *testing.T) {
 		Key:       StringEncoder(TestMessage),
 		Value:     StringEncoder(TestMessage),
 		Headers: []RecordHeader{
-			RecordHeader{
+			{
 				Key:   []byte("header-1"),
 				Value: []byte("value-1"),
 			},
-			RecordHeader{
+			{
 				Key:   []byte("header-2"),
 				Value: []byte("value-2"),
 			},
-			RecordHeader{
+			{
 				Key:   []byte("header-3"),
 				Value: []byte("value-3"),
 			},
@@ -306,15 +306,15 @@ func TestProduceSetIdempotentRequestBuilding(t *testing.T) {
 		Key:       StringEncoder(TestMessage),
 		Value:     StringEncoder(TestMessage),
 		Headers: []RecordHeader{
-			RecordHeader{
+			{
 				Key:   []byte("header-1"),
 				Value: []byte("value-1"),
 			},
-			RecordHeader{
+			{
 				Key:   []byte("header-2"),
 				Value: []byte("value-2"),
 			},
-			RecordHeader{
+			{
 				Key:   []byte("header-3"),
 				Value: []byte("value-3"),
 			},

+ 0 - 1
sasl_authenticate_response_test.go

@@ -11,7 +11,6 @@ var (
 )
 
 func TestSaslAuthenticateResponse(t *testing.T) {
-
 	response := new(SaslAuthenticateResponse)
 	response.Err = ErrSASLAuthenticationFailed
 	msg := "err"

+ 1 - 1
txn_offset_commit_request_test.go

@@ -24,7 +24,7 @@ func TestTxnOffsetCommitRequest(t *testing.T) {
 		ProducerID:      8000,
 		ProducerEpoch:   1,
 		Topics: map[string][]*PartitionOffsetMetadata{
-			"topic": []*PartitionOffsetMetadata{{
+			"topic": {{
 				Offset:    123,
 				Partition: 2,
 			}},

+ 1 - 1
txn_offset_commit_response_test.go

@@ -20,7 +20,7 @@ func TestTxnOffsetCommitResponse(t *testing.T) {
 	resp := &TxnOffsetCommitResponse{
 		ThrottleTime: 100 * time.Millisecond,
 		Topics: map[string][]*PartitionError{
-			"topic": []*PartitionError{{
+			"topic": {{
 				Partition: 2,
 				Err:       ErrInvalidProducerEpoch,
 			}},

Some files were not shown because too many files changed in this diff