Browse Source

Merge pull request #6649 from fanminshi/discovery_max_wait

discovery: add upper limit for waiting on a retry
fanmin shi 9 years ago
parent
commit
77d6ecbc5f
1 changed files with 10 additions and 4 deletions
  1. 10 4
      discovery/discovery.go

+ 10 - 4
discovery/discovery.go

@@ -52,7 +52,8 @@ var (
 
 
 var (
 var (
 	// Number of retries discovery will attempt before giving up and erroring out.
 	// Number of retries discovery will attempt before giving up and erroring out.
-	nRetries = uint(math.MaxUint32)
+	nRetries             = uint(math.MaxUint32)
+	maxExpoentialRetries = uint(8)
 )
 )
 
 
 // JoinCluster will connect to the discovery service at the given url, and
 // JoinCluster will connect to the discovery service at the given url, and
@@ -268,9 +269,14 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
 
 
 func (d *discovery) logAndBackoffForRetry(step string) {
 func (d *discovery) logAndBackoffForRetry(step string) {
 	d.retries++
 	d.retries++
-	retryTime := time.Second * (0x1 << d.retries)
-	plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTime)
-	d.clock.Sleep(retryTime)
+	// logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward.
+	retries := d.retries
+	if retries > maxExpoentialRetries {
+		retries = maxExpoentialRetries
+	}
+	retryTimeInSecond := time.Duration(0x1<<retries) * time.Second
+	plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTimeInSecond)
+	d.clock.Sleep(retryTimeInSecond)
 }
 }
 
 
 func (d *discovery) checkClusterRetry() ([]*client.Node, int, uint64, error) {
 func (d *discovery) checkClusterRetry() ([]*client.Node, int, uint64, error) {