2637309949@qq.com 5 سال پیش
کامیت
6754b55fc6
74فایلهای تغییر یافته به همراه22335 افزوده شده و 0 حذف شده
  1. 2 0
      .gitignore
  2. 15 0
      .golangci.yml
  3. 24 0
      .travis.yml
  4. 37 0
      CHANGELOG.md
  5. 25 0
      LICENSE
  6. 20 0
      Makefile
  7. 125 0
      README.md
  8. 338 0
      bench_test.go
  9. 1634 0
      cluster.go
  10. 22 0
      cluster_commands.go
  11. 1051 0
      cluster_test.go
  12. 2027 0
      command.go
  13. 96 0
      command_test.go
  14. 2628 0
      commands.go
  15. 4042 0
      commands_test.go
  16. 4 0
      doc.go
  17. 93 0
      error.go
  18. 80 0
      example_instrumentation_test.go
  19. 501 0
      example_test.go
  20. 82 0
      export_test.go
  21. 16 0
      go.mod
  22. 49 0
      go.sum
  23. 81 0
      internal/consistenthash/consistenthash.go
  24. 110 0
      internal/consistenthash/consistenthash_test.go
  25. 77 0
      internal/hashtag/hashtag.go
  26. 74 0
      internal/hashtag/hashtag_test.go
  27. 24 0
      internal/internal.go
  28. 18 0
      internal/internal_test.go
  29. 8 0
      internal/log.go
  30. 60 0
      internal/once.go
  31. 94 0
      internal/pool/bench_test.go
  32. 118 0
      internal/pool/conn.go
  33. 7 0
      internal/pool/export_test.go
  34. 36 0
      internal/pool/main_test.go
  35. 515 0
      internal/pool/pool.go
  36. 208 0
      internal/pool/pool_single.go
  37. 112 0
      internal/pool/pool_sticky.go
  38. 421 0
      internal/pool/pool_test.go
  39. 13 0
      internal/proto/proto_test.go
  40. 312 0
      internal/proto/reader.go
  41. 56 0
      internal/proto/reader_test.go
  42. 166 0
      internal/proto/scan.go
  43. 48 0
      internal/proto/scan_test.go
  44. 93 0
      internal/proto/write_buffer_test.go
  45. 165 0
      internal/proto/writer.go
  46. 56 0
      internal/util.go
  47. 11 0
      internal/util/safe.go
  48. 19 0
      internal/util/strconv.go
  49. 22 0
      internal/util/unsafe.go
  50. 65 0
      internal_test.go
  51. 75 0
      iterator.go
  52. 136 0
      iterator_test.go
  53. 372 0
      main_test.go
  54. 243 0
      options.go
  55. 119 0
      options_test.go
  56. 142 0
      pipeline.go
  57. 87 0
      pipeline_test.go
  58. 150 0
      pool_test.go
  59. 593 0
      pubsub.go
  60. 446 0
      pubsub_test.go
  61. 355 0
      race_test.go
  62. 742 0
      redis.go
  63. 391 0
      redis_test.go
  64. 172 0
      result.go
  65. 722 0
      ring.go
  66. 485 0
      ring_test.go
  67. 62 0
      script.go
  68. 503 0
      sentinel.go
  69. 88 0
      sentinel_test.go
  70. 10 0
      testdata/redis.conf
  71. 156 0
      tx.go
  72. 151 0
      tx_test.go
  73. 194 0
      universal.go
  74. 41 0
      universal_test.go

+ 2 - 0
.gitignore

@@ -0,0 +1,2 @@
+*.rdb
+testdata/*/

+ 15 - 0
.golangci.yml

@@ -0,0 +1,15 @@
+run:
+  concurrency: 8
+  deadline: 5m
+  tests: false
+linters:
+  enable-all: true
+  disable:
+    - funlen
+    - gochecknoglobals
+    - gocognit
+    - goconst
+    - godox
+    - gosec
+    - maligned
+    - wsl

+ 24 - 0
.travis.yml

@@ -0,0 +1,24 @@
+dist: xenial
+sudo: false
+language: go
+
+services:
+  - redis-server
+
+go:
+  - 1.11.x
+  - 1.12.x
+  - 1.13.x
+  - tip
+
+matrix:
+  allow_failures:
+    - go: tip
+
+env:
+  - GO111MODULE=on
+
+go_import_path: github.com/go-redis/redis
+
+before_install:
+  - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0

+ 37 - 0
CHANGELOG.md

@@ -0,0 +1,37 @@
+# Changelog
+
+## v7 WIP
+
+- Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a transactional pipeline.
+- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
+- WithContext now can not be used to create a shallow copy of the client.
+- New methods ProcessContext, DoContext, and ExecContext.
+- Client respects Context.Deadline when setting net.Conn deadline.
+- Client listens on Context.Done while waiting for a connection from the pool and returns an error when context context is cancelled.
+- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow detecting reconnections.
+- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse time.
+- `SetLimiter` is removed and added `Options.Limiter` instead.
+
+## v6.15
+
+- Cluster and Ring pipelines process commands for each node in its own goroutine.
+
+## 6.14
+
+- Added Options.MinIdleConns.
+- Added Options.MaxConnAge.
+- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
+- Add Client.Do to simplify creating custom commands.
+- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
+- Lower memory usage.
+
+## v6.13
+
+- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards.
+- Cluster client was optimized to use much less memory when reloading cluster state.
+- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead.
+- Dialer.KeepAlive is set to 5 minutes by default.
+
+## v6.12
+
+- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup

+ 25 - 0
LICENSE

@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 20 - 0
Makefile

@@ -0,0 +1,20 @@
+all: testdeps
+	go test ./...
+	go test ./... -short -race
+	go test ./... -run=NONE -bench=. -benchmem
+	env GOOS=linux GOARCH=386 go test ./...
+	golangci-lint run
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+	go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+	mkdir -p $@
+	wget -qO- http://download.redis.io/releases/redis-5.0.7.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+	cd $< && make all

+ 125 - 0
README.md

@@ -0,0 +1,125 @@
+# Redis client for Golang
+
+[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
+[![GoDoc](https://godoc.org/github.com/go-redis/redis?status.svg)](https://godoc.org/github.com/go-redis/redis)
+[![Airbrake](https://img.shields.io/badge/kudos-airbrake.io-orange.svg)](https://airbrake.io)
+
+Supports:
+
+- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC.
+- Automatic connection pooling with [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
+- [Pub/Sub](https://godoc.org/github.com/go-redis/redis#PubSub).
+- [Transactions](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
+- [Pipeline](https://godoc.org/github.com/go-redis/redis#example-Client-Pipeline) and [TxPipeline](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
+- [Scripting](https://godoc.org/github.com/go-redis/redis#Script).
+- [Timeouts](https://godoc.org/github.com/go-redis/redis#Options).
+- [Redis Sentinel](https://godoc.org/github.com/go-redis/redis#NewFailoverClient).
+- [Redis Cluster](https://godoc.org/github.com/go-redis/redis#NewClusterClient).
+- [Cluster of Redis Servers](https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup) without using cluster mode and Redis Sentinel.
+- [Ring](https://godoc.org/github.com/go-redis/redis#NewRing).
+- [Instrumentation](https://godoc.org/github.com/go-redis/redis#ex-package--Instrumentation).
+- [Cache friendly](https://github.com/go-redis/cache).
+- [Rate limiting](https://github.com/go-redis/redis_rate).
+- [Distributed Locks](https://github.com/bsm/redislock).
+
+API docs: https://godoc.org/github.com/go-redis/redis.
+Examples: https://godoc.org/github.com/go-redis/redis#pkg-examples.
+
+## Installation
+
+go-redis requires a Go version with [Modules](https://github.com/golang/go/wiki/Modules) support and uses import versioning. So please make sure to initialize a Go module before installing go-redis:
+
+``` shell
+go mod init github.com/my/repo
+go get github.com/go-redis/redis/v7
+```
+
+Import:
+
+``` go
+import "github.com/go-redis/redis/v7"
+```
+
+## Quickstart
+
+``` go
+func ExampleNewClient() {
+	client := redis.NewClient(&redis.Options{
+		Addr:     "localhost:6379",
+		Password: "", // no password set
+		DB:       0,  // use default DB
+	})
+
+	pong, err := client.Ping().Result()
+	fmt.Println(pong, err)
+	// Output: PONG <nil>
+}
+
+func ExampleClient() {
+	client := redis.NewClient(&redis.Options{
+		Addr:     "localhost:6379",
+		Password: "", // no password set
+		DB:       0,  // use default DB
+	})
+	err := client.Set("key", "value", 0).Err()
+	if err != nil {
+		panic(err)
+	}
+
+	val, err := client.Get("key").Result()
+	if err != nil {
+		panic(err)
+	}
+	fmt.Println("key", val)
+
+	val2, err := client.Get("key2").Result()
+	if err == redis.Nil {
+		fmt.Println("key2 does not exist")
+	} else if err != nil {
+		panic(err)
+	} else {
+		fmt.Println("key2", val2)
+	}
+	// Output: key value
+	// key2 does not exist
+}
+```
+
+## Howto
+
+Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package.
+
+## Look and feel
+
+Some corner cases:
+
+``` go
+// SET key value EX 10 NX
+set, err := client.SetNX("key", "value", 10*time.Second).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := client.Sort("list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
+	Min: "-inf",
+	Max: "+inf",
+	Offset: 0,
+	Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := client.Do("set", "key", "value").Result()
+```
+
+## See also
+
+- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
+- [Golang msgpack](https://github.com/vmihailenco/msgpack)
+- [Golang message task queue](https://github.com/vmihailenco/taskq)

+ 338 - 0
bench_test.go

@@ -0,0 +1,338 @@
+package redis_test
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/go-redis/redis/v7"
+)
+
+func benchmarkRedisClient(poolSize int) *redis.Client {
+	client := redis.NewClient(&redis.Options{
+		Addr:         ":6379",
+		DialTimeout:  time.Second,
+		ReadTimeout:  time.Second,
+		WriteTimeout: time.Second,
+		PoolSize:     poolSize,
+	})
+	if err := client.FlushDB().Err(); err != nil {
+		panic(err)
+	}
+	return client
+}
+
+func BenchmarkRedisPing(b *testing.B) {
+	client := benchmarkRedisClient(10)
+	defer client.Close()
+
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			if err := client.Ping().Err(); err != nil {
+				b.Fatal(err)
+			}
+		}
+	})
+}
+
+func BenchmarkRedisGetNil(b *testing.B) {
+	client := benchmarkRedisClient(10)
+	defer client.Close()
+
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			if err := client.Get("key").Err(); err != redis.Nil {
+				b.Fatal(err)
+			}
+		}
+	})
+}
+
+type setStringBenchmark struct {
+	poolSize  int
+	valueSize int
+}
+
+func (bm setStringBenchmark) String() string {
+	return fmt.Sprintf("pool=%d value=%d", bm.poolSize, bm.valueSize)
+}
+
+func BenchmarkRedisSetString(b *testing.B) {
+	benchmarks := []setStringBenchmark{
+		{10, 64},
+		{10, 1024},
+		{10, 64 * 1024},
+		{10, 1024 * 1024},
+		{10, 10 * 1024 * 1024},
+
+		{100, 64},
+		{100, 1024},
+		{100, 64 * 1024},
+		{100, 1024 * 1024},
+		{100, 10 * 1024 * 1024},
+	}
+	for _, bm := range benchmarks {
+		b.Run(bm.String(), func(b *testing.B) {
+			client := benchmarkRedisClient(bm.poolSize)
+			defer client.Close()
+
+			value := strings.Repeat("1", bm.valueSize)
+
+			b.ResetTimer()
+
+			b.RunParallel(func(pb *testing.PB) {
+				for pb.Next() {
+					err := client.Set("key", value, 0).Err()
+					if err != nil {
+						b.Fatal(err)
+					}
+				}
+			})
+		})
+	}
+}
+
+func BenchmarkRedisSetGetBytes(b *testing.B) {
+	client := benchmarkRedisClient(10)
+	defer client.Close()
+
+	value := bytes.Repeat([]byte{'1'}, 10000)
+
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			if err := client.Set("key", value, 0).Err(); err != nil {
+				b.Fatal(err)
+			}
+
+			got, err := client.Get("key").Bytes()
+			if err != nil {
+				b.Fatal(err)
+			}
+			if !bytes.Equal(got, value) {
+				b.Fatalf("got != value")
+			}
+		}
+	})
+}
+
+func BenchmarkRedisMGet(b *testing.B) {
+	client := benchmarkRedisClient(10)
+	defer client.Close()
+
+	if err := client.MSet("key1", "hello1", "key2", "hello2").Err(); err != nil {
+		b.Fatal(err)
+	}
+
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			if err := client.MGet("key1", "key2").Err(); err != nil {
+				b.Fatal(err)
+			}
+		}
+	})
+}
+
+func BenchmarkSetExpire(b *testing.B) {
+	client := benchmarkRedisClient(10)
+	defer client.Close()
+
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			if err := client.Set("key", "hello", 0).Err(); err != nil {
+				b.Fatal(err)
+			}
+			if err := client.Expire("key", time.Second).Err(); err != nil {
+				b.Fatal(err)
+			}
+		}
+	})
+}
+
+func BenchmarkPipeline(b *testing.B) {
+	client := benchmarkRedisClient(10)
+	defer client.Close()
+
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			_, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+				pipe.Set("key", "hello", 0)
+				pipe.Expire("key", time.Second)
+				return nil
+			})
+			if err != nil {
+				b.Fatal(err)
+			}
+		}
+	})
+}
+
+func BenchmarkZAdd(b *testing.B) {
+	client := benchmarkRedisClient(10)
+	defer client.Close()
+
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			err := client.ZAdd("key", &redis.Z{
+				Score:  float64(1),
+				Member: "hello",
+			}).Err()
+			if err != nil {
+				b.Fatal(err)
+			}
+		}
+	})
+}
+
+var clientSink *redis.Client
+
+func BenchmarkWithContext(b *testing.B) {
+	rdb := benchmarkRedisClient(10)
+	defer rdb.Close()
+
+	ctx := context.Background()
+
+	b.ResetTimer()
+	b.ReportAllocs()
+
+	for i := 0; i < b.N; i++ {
+		clientSink = rdb.WithContext(ctx)
+	}
+}
+
+var ringSink *redis.Ring
+
+func BenchmarkRingWithContext(b *testing.B) {
+	rdb := redis.NewRing(&redis.RingOptions{})
+	defer rdb.Close()
+
+	ctx := context.Background()
+
+	b.ResetTimer()
+	b.ReportAllocs()
+
+	for i := 0; i < b.N; i++ {
+		ringSink = rdb.WithContext(ctx)
+	}
+}
+
+//------------------------------------------------------------------------------
+
+func newClusterScenario() *clusterScenario {
+	return &clusterScenario{
+		ports:     []string{"8220", "8221", "8222", "8223", "8224", "8225"},
+		nodeIDs:   make([]string, 6),
+		processes: make(map[string]*redisProcess, 6),
+		clients:   make(map[string]*redis.Client, 6),
+	}
+}
+
+func BenchmarkClusterPing(b *testing.B) {
+	if testing.Short() {
+		b.Skip("skipping in short mode")
+	}
+
+	cluster := newClusterScenario()
+	if err := startCluster(cluster); err != nil {
+		b.Fatal(err)
+	}
+	defer stopCluster(cluster)
+
+	client := cluster.clusterClient(redisClusterOptions())
+	defer client.Close()
+
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			err := client.Ping().Err()
+			if err != nil {
+				b.Fatal(err)
+			}
+		}
+	})
+}
+
+func BenchmarkClusterSetString(b *testing.B) {
+	if testing.Short() {
+		b.Skip("skipping in short mode")
+	}
+
+	cluster := newClusterScenario()
+	if err := startCluster(cluster); err != nil {
+		b.Fatal(err)
+	}
+	defer stopCluster(cluster)
+
+	client := cluster.clusterClient(redisClusterOptions())
+	defer client.Close()
+
+	value := string(bytes.Repeat([]byte{'1'}, 10000))
+
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			err := client.Set("key", value, 0).Err()
+			if err != nil {
+				b.Fatal(err)
+			}
+		}
+	})
+}
+
+func BenchmarkClusterReloadState(b *testing.B) {
+	if testing.Short() {
+		b.Skip("skipping in short mode")
+	}
+
+	cluster := newClusterScenario()
+	if err := startCluster(cluster); err != nil {
+		b.Fatal(err)
+	}
+	defer stopCluster(cluster)
+
+	client := cluster.clusterClient(redisClusterOptions())
+	defer client.Close()
+
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		err := client.ReloadState()
+		if err != nil {
+			b.Fatal(err)
+		}
+	}
+}
+
+var clusterSink *redis.ClusterClient
+
+func BenchmarkClusterWithContext(b *testing.B) {
+	rdb := redis.NewClusterClient(&redis.ClusterOptions{})
+	defer rdb.Close()
+
+	ctx := context.Background()
+
+	b.ResetTimer()
+	b.ReportAllocs()
+
+	for i := 0; i < b.N; i++ {
+		clusterSink = rdb.WithContext(ctx)
+	}
+}

+ 1634 - 0
cluster.go

@@ -0,0 +1,1634 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"fmt"
+	"math"
+	"math/rand"
+	"net"
+	"runtime"
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/internal"
+	"github.com/go-redis/redis/internal/hashtag"
+	"github.com/go-redis/redis/internal/pool"
+	"github.com/go-redis/redis/internal/proto"
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+	// A seed list of host:port addresses of cluster nodes.
+	Addrs []string
+
+	// The maximum number of retries before giving up. Command is retried
+	// on network errors and MOVED/ASK redirects.
+	// Default is 8 retries.
+	MaxRedirects int
+
+	// Enables read-only commands on slave nodes.
+	ReadOnly bool
+	// Allows routing read-only commands to the closest master or slave node.
+	// It automatically enables ReadOnly.
+	RouteByLatency bool
+	// Allows routing read-only commands to the random master or slave node.
+	// It automatically enables ReadOnly.
+	RouteRandomly bool
+
+	// Optional function that returns cluster slots information.
+	// It is useful to manually create cluster of standalone Redis servers
+	// and load-balance read/write operations between master and slaves.
+	// It can use service like ZooKeeper to maintain configuration information
+	// and Cluster.ReloadState to manually trigger state reloading.
+	ClusterSlots func() ([]ClusterSlot, error)
+
+	// Optional hook that is called when a new node is created.
+	OnNewNode func(*Client)
+
+	// Following options are copied from Options struct.
+
+	Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+	OnConnect func(*Conn) error
+
+	Password string
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	// PoolSize applies per cluster node and not for the whole cluster.
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+
+	TLSConfig *tls.Config
+}
+
+func (opt *ClusterOptions) init() {
+	if opt.MaxRedirects == -1 {
+		opt.MaxRedirects = 0
+	} else if opt.MaxRedirects == 0 {
+		opt.MaxRedirects = 8
+	}
+
+	if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
+		opt.ReadOnly = true
+	}
+
+	if opt.PoolSize == 0 {
+		opt.PoolSize = 5 * runtime.NumCPU()
+	}
+
+	switch opt.ReadTimeout {
+	case -1:
+		opt.ReadTimeout = 0
+	case 0:
+		opt.ReadTimeout = 3 * time.Second
+	}
+	switch opt.WriteTimeout {
+	case -1:
+		opt.WriteTimeout = 0
+	case 0:
+		opt.WriteTimeout = opt.ReadTimeout
+	}
+
+	switch opt.MinRetryBackoff {
+	case -1:
+		opt.MinRetryBackoff = 0
+	case 0:
+		opt.MinRetryBackoff = 8 * time.Millisecond
+	}
+	switch opt.MaxRetryBackoff {
+	case -1:
+		opt.MaxRetryBackoff = 0
+	case 0:
+		opt.MaxRetryBackoff = 512 * time.Millisecond
+	}
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+	const disableIdleCheck = -1
+
+	return &Options{
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		MaxRetries:      opt.MaxRetries,
+		MinRetryBackoff: opt.MinRetryBackoff,
+		MaxRetryBackoff: opt.MaxRetryBackoff,
+		Password:        opt.Password,
+		readOnly:        opt.ReadOnly,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolSize:           opt.PoolSize,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: disableIdleCheck,
+
+		TLSConfig: opt.TLSConfig,
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+	Client *Client
+
+	latency    uint32 // atomic
+	generation uint32 // atomic
+	failing    uint32 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+	opt := clOpt.clientOptions()
+	opt.Addr = addr
+	node := clusterNode{
+		Client: NewClient(opt),
+	}
+
+	node.latency = math.MaxUint32
+	if clOpt.RouteByLatency {
+		go node.updateLatency()
+	}
+
+	if clOpt.OnNewNode != nil {
+		clOpt.OnNewNode(node.Client)
+	}
+
+	return &node
+}
+
+func (n *clusterNode) String() string {
+	return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+	return n.Client.Close()
+}
+
+func (n *clusterNode) updateLatency() {
+	const probes = 10
+
+	var latency uint32
+	for i := 0; i < probes; i++ {
+		start := time.Now()
+		n.Client.Ping()
+		probe := uint32(time.Since(start) / time.Microsecond)
+		latency = (latency + probe) / 2
+	}
+	atomic.StoreUint32(&n.latency, latency)
+}
+
+func (n *clusterNode) Latency() time.Duration {
+	latency := atomic.LoadUint32(&n.latency)
+	return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsFailing() {
+	atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Failing() bool {
+	const timeout = 15 // 15 seconds
+
+	failing := atomic.LoadUint32(&n.failing)
+	if failing == 0 {
+		return false
+	}
+	if time.Now().Unix()-int64(failing) < timeout {
+		return true
+	}
+	atomic.StoreUint32(&n.failing, 0)
+	return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+	return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+	for {
+		v := atomic.LoadUint32(&n.generation)
+		if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+			break
+		}
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+	opt *ClusterOptions
+
+	mu           sync.RWMutex
+	allAddrs     []string
+	allNodes     map[string]*clusterNode
+	clusterAddrs []string
+	closed       bool
+
+	_generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+	return &clusterNodes{
+		opt: opt,
+
+		allAddrs: opt.Addrs,
+		allNodes: make(map[string]*clusterNode),
+	}
+}
+
+func (c *clusterNodes) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil
+	}
+	c.closed = true
+
+	var firstErr error
+	for _, node := range c.allNodes {
+		if err := node.Client.Close(); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+
+	c.allNodes = nil
+	c.clusterAddrs = nil
+
+	return firstErr
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+	var addrs []string
+	c.mu.RLock()
+	closed := c.closed
+	if !closed {
+		if len(c.clusterAddrs) > 0 {
+			addrs = c.clusterAddrs
+		} else {
+			addrs = c.allAddrs
+		}
+	}
+	c.mu.RUnlock()
+
+	if closed {
+		return nil, pool.ErrClosed
+	}
+	if len(addrs) == 0 {
+		return nil, errClusterNoNodes
+	}
+	return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+	return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+	//nolint:prealloc
+	var collected []*clusterNode
+	c.mu.Lock()
+	for addr, node := range c.allNodes {
+		if node.Generation() >= generation {
+			continue
+		}
+
+		c.clusterAddrs = remove(c.clusterAddrs, addr)
+		delete(c.allNodes, addr)
+		collected = append(collected, node)
+	}
+	c.mu.Unlock()
+
+	for _, node := range collected {
+		_ = node.Client.Close()
+	}
+}
+
+func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
+	node, err := c.get(addr)
+	if err != nil {
+		return nil, err
+	}
+	if node != nil {
+		return node, nil
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	node, ok := c.allNodes[addr]
+	if ok {
+		return node, err
+	}
+
+	node = newClusterNode(c.opt, addr)
+
+	c.allAddrs = appendIfNotExists(c.allAddrs, addr)
+	c.clusterAddrs = append(c.clusterAddrs, addr)
+	c.allNodes[addr] = node
+
+	return node, err
+}
+
+func (c *clusterNodes) get(addr string) (*clusterNode, error) {
+	var node *clusterNode
+	var err error
+	c.mu.RLock()
+	if c.closed {
+		err = pool.ErrClosed
+	} else {
+		node = c.allNodes[addr]
+	}
+	c.mu.RUnlock()
+	return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	cp := make([]*clusterNode, 0, len(c.allNodes))
+	for _, node := range c.allNodes {
+		cp = append(cp, node)
+	}
+	return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+	addrs, err := c.Addrs()
+	if err != nil {
+		return nil, err
+	}
+
+	n := rand.Intn(len(addrs))
+	return c.Get(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+	start, end int
+	nodes      []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+	return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+	return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+	p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+	nodes   *clusterNodes
+	Masters []*clusterNode
+	Slaves  []*clusterNode
+
+	slots []*clusterSlot
+
+	generation uint32
+	createdAt  time.Time
+}
+
+func newClusterState(
+	nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+	c := clusterState{
+		nodes: nodes,
+
+		slots: make([]*clusterSlot, 0, len(slots)),
+
+		generation: nodes.NextGeneration(),
+		createdAt:  time.Now(),
+	}
+
+	originHost, _, _ := net.SplitHostPort(origin)
+	isLoopbackOrigin := isLoopback(originHost)
+
+	for _, slot := range slots {
+		var nodes []*clusterNode
+		for i, slotNode := range slot.Nodes {
+			addr := slotNode.Addr
+			if !isLoopbackOrigin {
+				addr = replaceLoopbackHost(addr, originHost)
+			}
+
+			node, err := c.nodes.Get(addr)
+			if err != nil {
+				return nil, err
+			}
+
+			node.SetGeneration(c.generation)
+			nodes = append(nodes, node)
+
+			if i == 0 {
+				c.Masters = appendUniqueNode(c.Masters, node)
+			} else {
+				c.Slaves = appendUniqueNode(c.Slaves, node)
+			}
+		}
+
+		c.slots = append(c.slots, &clusterSlot{
+			start: slot.Start,
+			end:   slot.End,
+			nodes: nodes,
+		})
+	}
+
+	sort.Sort(clusterSlotSlice(c.slots))
+
+	time.AfterFunc(time.Minute, func() {
+		nodes.GC(c.generation)
+	})
+
+	return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+	nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+	if err != nil {
+		return nodeAddr
+	}
+
+	nodeIP := net.ParseIP(nodeHost)
+	if nodeIP == nil {
+		return nodeAddr
+	}
+
+	if !nodeIP.IsLoopback() {
+		return nodeAddr
+	}
+
+	// Use origin host which is not loopback and node port.
+	return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+	ip := net.ParseIP(host)
+	if ip == nil {
+		return true
+	}
+	return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	if len(nodes) > 0 {
+		return nodes[0], nil
+	}
+	return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	switch len(nodes) {
+	case 0:
+		return c.nodes.Random()
+	case 1:
+		return nodes[0], nil
+	case 2:
+		if slave := nodes[1]; !slave.Failing() {
+			return slave, nil
+		}
+		return nodes[0], nil
+	default:
+		var slave *clusterNode
+		for i := 0; i < 10; i++ {
+			n := rand.Intn(len(nodes)-1) + 1
+			slave = nodes[n]
+			if !slave.Failing() {
+				return slave, nil
+			}
+		}
+
+		// All slaves are loading - use master.
+		return nodes[0], nil
+	}
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+	const threshold = time.Millisecond
+
+	nodes := c.slotNodes(slot)
+	if len(nodes) == 0 {
+		return c.nodes.Random()
+	}
+
+	var node *clusterNode
+	for _, n := range nodes {
+		if n.Failing() {
+			continue
+		}
+		if node == nil || node.Latency()-n.Latency() > threshold {
+			node = n
+		}
+	}
+	return node, nil
+}
+
+func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	if len(nodes) == 0 {
+		return c.nodes.Random()
+	}
+	n := rand.Intn(len(nodes))
+	return nodes[n], nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+	i := sort.Search(len(c.slots), func(i int) bool {
+		return c.slots[i].end >= slot
+	})
+	if i >= len(c.slots) {
+		return nil
+	}
+	x := c.slots[i]
+	if slot >= x.start && slot <= x.end {
+		return x.nodes
+	}
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+	load func() (*clusterState, error)
+
+	state     atomic.Value
+	reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder {
+	return &clusterStateHolder{
+		load: fn,
+	}
+}
+
+func (c *clusterStateHolder) Reload() (*clusterState, error) {
+	state, err := c.load()
+	if err != nil {
+		return nil, err
+	}
+	c.state.Store(state)
+	return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload() {
+	if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+		return
+	}
+	go func() {
+		defer atomic.StoreUint32(&c.reloading, 0)
+
+		_, err := c.Reload()
+		if err != nil {
+			return
+		}
+		time.Sleep(100 * time.Millisecond)
+	}()
+}
+
+func (c *clusterStateHolder) Get() (*clusterState, error) {
+	v := c.state.Load()
+	if v != nil {
+		state := v.(*clusterState)
+		if time.Since(state.createdAt) > time.Minute {
+			c.LazyReload()
+		}
+		return state, nil
+	}
+	return c.Reload()
+}
+
+func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) {
+	state, err := c.Reload()
+	if err == nil {
+		return state, nil
+	}
+	return c.Get()
+}
+
+//------------------------------------------------------------------------------
+
+type clusterClient struct {
+	opt           *ClusterOptions
+	nodes         *clusterNodes
+	state         *clusterStateHolder //nolint:structcheck
+	cmdsInfoCache *cmdsInfoCache      //nolint:structcheck
+}
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+	*clusterClient
+	cmdable
+	hooks
+	ctx context.Context
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+	opt.init()
+
+	c := &ClusterClient{
+		clusterClient: &clusterClient{
+			opt:   opt,
+			nodes: newClusterNodes(opt),
+		},
+		ctx: context.Background(),
+	}
+	c.state = newClusterStateHolder(c.loadState)
+	c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+	c.cmdable = c.Process
+
+	if opt.IdleCheckFrequency > 0 {
+		go c.reaper(opt.IdleCheckFrequency)
+	}
+
+	return c
+}
+
+func (c *ClusterClient) Context() context.Context {
+	return c.ctx
+}
+
+func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.cmdable = clone.Process
+	clone.hooks.Lock()
+	clone.ctx = ctx
+	return &clone
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+	return c.opt
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState() error {
+	_, err := c.state.Reload()
+	return err
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+	return c.nodes.Close()
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(args ...interface{}) *Cmd {
+	return c.DoContext(c.ctx, args...)
+}
+
+func (c *ClusterClient) DoContext(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(args...)
+	_ = c.ProcessContext(ctx, cmd)
+	return cmd
+}
+
+func (c *ClusterClient) Process(cmd Cmder) error {
+	return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *ClusterClient) ProcessContext(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.process)
+}
+
+func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
+	err := c._process(ctx, cmd)
+	if err != nil {
+		cmd.SetErr(err)
+		return err
+	}
+	return nil
+}
+
+func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
+	cmdInfo := c.cmdInfo(cmd.Name())
+	slot := c.cmdSlot(cmd)
+
+	var node *clusterNode
+	var ask bool
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		if node == nil {
+			var err error
+			node, err = c.cmdNode(cmdInfo, slot)
+			if err != nil {
+				return err
+			}
+		}
+
+		if ask {
+			pipe := node.Client.Pipeline()
+			_ = pipe.Process(NewCmd("ASKING"))
+			_ = pipe.Process(cmd)
+			_, lastErr = pipe.ExecContext(ctx)
+			_ = pipe.Close()
+			ask = false
+		} else {
+			lastErr = node.Client._process(ctx, cmd)
+		}
+
+		// If there is no error - we are done.
+		if lastErr == nil {
+			return nil
+		}
+		if lastErr != Nil {
+			c.state.LazyReload()
+		}
+		if lastErr == pool.ErrClosed || isReadOnlyError(lastErr) {
+			node = nil
+			continue
+		}
+
+		// If slave is loading - pick another node.
+		if c.opt.ReadOnly && isLoadingError(lastErr) {
+			node.MarkAsFailing()
+			node = nil
+			continue
+		}
+
+		var moved bool
+		var addr string
+		moved, ask, addr = isMovedError(lastErr)
+		if moved || ask {
+			var err error
+			node, err = c.nodes.Get(addr)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		if isRetryableError(lastErr, cmd.readTimeout() == nil) {
+			// First retry the same node.
+			if attempt == 0 {
+				continue
+			}
+
+			// Second try another node.
+			node.MarkAsFailing()
+			node = nil
+			continue
+		}
+
+		return lastErr
+	}
+	return lastErr
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
+	state, err := c.state.ReloadOrGet()
+	if err != nil {
+		return err
+	}
+
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+	for _, master := range state.Masters {
+		wg.Add(1)
+		go func(node *clusterNode) {
+			defer wg.Done()
+			err := fn(node.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(master)
+	}
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
+	state, err := c.state.ReloadOrGet()
+	if err != nil {
+		return err
+	}
+
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+	for _, slave := range state.Slaves {
+		wg.Add(1)
+		go func(node *clusterNode) {
+			defer wg.Done()
+			err := fn(node.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(slave)
+	}
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// ForEachNode concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
+	state, err := c.state.ReloadOrGet()
+	if err != nil {
+		return err
+	}
+
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+	worker := func(node *clusterNode) {
+		defer wg.Done()
+		err := fn(node.Client)
+		if err != nil {
+			select {
+			case errCh <- err:
+			default:
+			}
+		}
+	}
+
+	for _, node := range state.Masters {
+		wg.Add(1)
+		go worker(node)
+	}
+	for _, node := range state.Slaves {
+		wg.Add(1)
+		go worker(node)
+	}
+
+	wg.Wait()
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+	var acc PoolStats
+
+	state, _ := c.state.Get()
+	if state == nil {
+		return &acc
+	}
+
+	for _, node := range state.Masters {
+		s := node.Client.connPool.Stats()
+		acc.Hits += s.Hits
+		acc.Misses += s.Misses
+		acc.Timeouts += s.Timeouts
+
+		acc.TotalConns += s.TotalConns
+		acc.IdleConns += s.IdleConns
+		acc.StaleConns += s.StaleConns
+	}
+
+	for _, node := range state.Slaves {
+		s := node.Client.connPool.Stats()
+		acc.Hits += s.Hits
+		acc.Misses += s.Misses
+		acc.Timeouts += s.Timeouts
+
+		acc.TotalConns += s.TotalConns
+		acc.IdleConns += s.IdleConns
+		acc.StaleConns += s.StaleConns
+	}
+
+	return &acc
+}
+
+func (c *ClusterClient) loadState() (*clusterState, error) {
+	if c.opt.ClusterSlots != nil {
+		slots, err := c.opt.ClusterSlots()
+		if err != nil {
+			return nil, err
+		}
+		return newClusterState(c.nodes, slots, "")
+	}
+
+	addrs, err := c.nodes.Addrs()
+	if err != nil {
+		return nil, err
+	}
+
+	var firstErr error
+	for _, addr := range addrs {
+		node, err := c.nodes.Get(addr)
+		if err != nil {
+			if firstErr == nil {
+				firstErr = err
+			}
+			continue
+		}
+
+		slots, err := node.Client.ClusterSlots().Result()
+		if err != nil {
+			if firstErr == nil {
+				firstErr = err
+			}
+			continue
+		}
+
+		return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+	}
+
+	return nil, firstErr
+}
+
+// reaper closes idle connections to the cluster.
+func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
+	ticker := time.NewTicker(idleCheckFrequency)
+	defer ticker.Stop()
+
+	for range ticker.C {
+		nodes, err := c.nodes.All()
+		if err != nil {
+			break
+		}
+
+		for _, node := range nodes {
+			_, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+			if err != nil {
+				internal.Logger.Printf("ReapStaleConns failed: %s", err)
+			}
+		}
+	}
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(fn)
+}
+
+func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
+}
+
+func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
+	cmdsMap := newCmdsMap()
+	err := c.mapCmdsByNode(cmdsMap, cmds)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				setCmdsErr(cmds, err)
+				return err
+			}
+		}
+
+		failedCmds := newCmdsMap()
+		var wg sync.WaitGroup
+
+		for node, cmds := range cmdsMap.m {
+			wg.Add(1)
+			go func(node *clusterNode, cmds []Cmder) {
+				defer wg.Done()
+
+				err := node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+					err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+						return writeCmd(wr, cmds...)
+					})
+					if err != nil {
+						return err
+					}
+
+					return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+						return c.pipelineReadCmds(node, rd, cmds, failedCmds)
+					})
+				})
+				if err == nil {
+					return
+				}
+				if attempt < c.opt.MaxRedirects {
+					if err := c.mapCmdsByNode(failedCmds, cmds); err != nil {
+						setCmdsErr(cmds, err)
+					}
+				} else {
+					setCmdsErr(cmds, err)
+				}
+			}(node, cmds)
+		}
+
+		wg.Wait()
+		if len(failedCmds.m) == 0 {
+			break
+		}
+		cmdsMap = failedCmds
+	}
+
+	return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsByNode(cmdsMap *cmdsMap, cmds []Cmder) error {
+	state, err := c.state.Get()
+	if err != nil {
+		return err
+	}
+
+	if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
+		for _, cmd := range cmds {
+			slot := c.cmdSlot(cmd)
+			node, err := c.slotReadOnlyNode(state, slot)
+			if err != nil {
+				return err
+			}
+			cmdsMap.Add(node, cmd)
+		}
+		return nil
+	}
+
+	for _, cmd := range cmds {
+		slot := c.cmdSlot(cmd)
+		node, err := state.slotMasterNode(slot)
+		if err != nil {
+			return err
+		}
+		cmdsMap.Add(node, cmd)
+	}
+	return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+	for _, cmd := range cmds {
+		cmdInfo := c.cmdInfo(cmd.Name())
+		if cmdInfo == nil || !cmdInfo.ReadOnly {
+			return false
+		}
+	}
+	return true
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+	node *clusterNode, rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+	for _, cmd := range cmds {
+		err := cmd.readReply(rd)
+		if err == nil {
+			continue
+		}
+		if c.checkMovedErr(cmd, err, failedCmds) {
+			continue
+		}
+
+		if c.opt.ReadOnly && isLoadingError(err) {
+			node.MarkAsFailing()
+			return err
+		}
+		if isRedisError(err) {
+			continue
+		}
+		return err
+	}
+	return nil
+}
+
+func (c *ClusterClient) checkMovedErr(
+	cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+	moved, ask, addr := isMovedError(err)
+	if !moved && !ask {
+		return false
+	}
+
+	node, err := c.nodes.Get(addr)
+	if err != nil {
+		return false
+	}
+
+	if moved {
+		c.state.LazyReload()
+		failedCmds.Add(node, cmd)
+		return true
+	}
+
+	if ask {
+		failedCmds.Add(node, NewCmd("ASKING"), cmd)
+		return true
+	}
+
+	panic("not reached")
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(fn)
+}
+
+func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, c._processTxPipeline)
+}
+
+func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	state, err := c.state.Get()
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	cmdsMap := c.mapCmdsBySlot(cmds)
+	for slot, cmds := range cmdsMap {
+		node, err := state.slotMasterNode(slot)
+		if err != nil {
+			setCmdsErr(cmds, err)
+			continue
+		}
+
+		cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+		for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+			if attempt > 0 {
+				if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+					setCmdsErr(cmds, err)
+					return err
+				}
+			}
+
+			failedCmds := newCmdsMap()
+			var wg sync.WaitGroup
+
+			for node, cmds := range cmdsMap {
+				wg.Add(1)
+				go func(node *clusterNode, cmds []Cmder) {
+					defer wg.Done()
+
+					err := node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+						err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+							return txPipelineWriteMulti(wr, cmds)
+						})
+						if err != nil {
+							return err
+						}
+
+						return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+							err := c.txPipelineReadQueued(rd, cmds, failedCmds)
+							if err != nil {
+								moved, ask, addr := isMovedError(err)
+								if moved || ask {
+									return c.cmdsMoved(cmds, moved, ask, addr, failedCmds)
+								}
+								return err
+							}
+							return pipelineReadCmds(rd, cmds)
+						})
+					})
+					if err == nil {
+						return
+					}
+					if attempt < c.opt.MaxRedirects {
+						if err := c.mapCmdsByNode(failedCmds, cmds); err != nil {
+							setCmdsErr(cmds, err)
+						}
+					} else {
+						setCmdsErr(cmds, err)
+					}
+				}(node, cmds)
+			}
+
+			wg.Wait()
+			if len(failedCmds.m) == 0 {
+				break
+			}
+			cmdsMap = failedCmds.m
+		}
+	}
+
+	return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+	cmdsMap := make(map[int][]Cmder)
+	for _, cmd := range cmds {
+		slot := c.cmdSlot(cmd)
+		cmdsMap[slot] = append(cmdsMap[slot], cmd)
+	}
+	return cmdsMap
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+	rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+	// Parse queued replies.
+	var statusCmd StatusCmd
+	if err := statusCmd.readReply(rd); err != nil {
+		return err
+	}
+
+	for _, cmd := range cmds {
+		err := statusCmd.readReply(rd)
+		if err == nil || c.checkMovedErr(cmd, err, failedCmds) || isRedisError(err) {
+			continue
+		}
+		return err
+	}
+
+	// Parse number of replies.
+	line, err := rd.ReadLine()
+	if err != nil {
+		if err == Nil {
+			err = TxFailedErr
+		}
+		return err
+	}
+
+	switch line[0] {
+	case proto.ErrorReply:
+		return proto.ParseErrorReply(line)
+	case proto.ArrayReply:
+		// ok
+	default:
+		return fmt.Errorf("redis: expected '*', but got line %q", line)
+	}
+
+	return nil
+}
+
+func (c *ClusterClient) cmdsMoved(
+	cmds []Cmder, moved, ask bool, addr string, failedCmds *cmdsMap,
+) error {
+	node, err := c.nodes.Get(addr)
+	if err != nil {
+		return err
+	}
+
+	if moved {
+		c.state.LazyReload()
+		for _, cmd := range cmds {
+			failedCmds.Add(node, cmd)
+		}
+		return nil
+	}
+
+	if ask {
+		for _, cmd := range cmds {
+			failedCmds.Add(node, NewCmd("ASKING"), cmd)
+		}
+		return nil
+	}
+
+	return nil
+}
+
+func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
+	return c.WatchContext(c.ctx, fn, keys...)
+}
+
+func (c *ClusterClient) WatchContext(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+	if len(keys) == 0 {
+		return fmt.Errorf("redis: Watch requires at least one key")
+	}
+
+	slot := hashtag.Slot(keys[0])
+	for _, key := range keys[1:] {
+		if hashtag.Slot(key) != slot {
+			err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+			return err
+		}
+	}
+
+	node, err := c.slotMasterNode(slot)
+	if err != nil {
+		return err
+	}
+
+	for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		err = node.Client.WatchContext(ctx, fn, keys...)
+		if err == nil {
+			break
+		}
+		if err != Nil {
+			c.state.LazyReload()
+		}
+
+		moved, ask, addr := isMovedError(err)
+		if moved || ask {
+			node, err = c.nodes.Get(addr)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		if err == pool.ErrClosed || isReadOnlyError(err) {
+			node, err = c.slotMasterNode(slot)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		if isRetryableError(err, true) {
+			continue
+		}
+
+		return err
+	}
+
+	return err
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+	var node *clusterNode
+	pubsub := &PubSub{
+		opt: c.opt.clientOptions(),
+
+		newConn: func(channels []string) (*pool.Conn, error) {
+			if node != nil {
+				panic("node != nil")
+			}
+
+			var err error
+			if len(channels) > 0 {
+				slot := hashtag.Slot(channels[0])
+				node, err = c.slotMasterNode(slot)
+			} else {
+				node, err = c.nodes.Random()
+			}
+			if err != nil {
+				return nil, err
+			}
+
+			cn, err := node.Client.newConn(context.TODO())
+			if err != nil {
+				node = nil
+
+				return nil, err
+			}
+
+			return cn, nil
+		},
+		closeConn: func(cn *pool.Conn) error {
+			err := node.Client.connPool.CloseConn(cn)
+			node = nil
+			return err
+		},
+	}
+	pubsub.init()
+
+	return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.Subscribe(channels...)
+	}
+	return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.PSubscribe(channels...)
+	}
+	return pubsub
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) {
+	addrs, err := c.nodes.Addrs()
+	if err != nil {
+		return nil, err
+	}
+
+	var firstErr error
+	for _, addr := range addrs {
+		node, err := c.nodes.Get(addr)
+		if err != nil {
+			return nil, err
+		}
+		if node == nil {
+			continue
+		}
+
+		info, err := node.Client.Command().Result()
+		if err == nil {
+			return info, nil
+		}
+		if firstErr == nil {
+			firstErr = err
+		}
+	}
+	return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
+	cmdsInfo, err := c.cmdsInfoCache.Get()
+	if err != nil {
+		return nil
+	}
+
+	info := cmdsInfo[name]
+	if info == nil {
+		internal.Logger.Printf("info for cmd=%s not found", name)
+	}
+	return info
+}
+
+func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+	args := cmd.Args()
+	if args[0] == "cluster" && args[1] == "getkeysinslot" {
+		return args[2].(int)
+	}
+
+	cmdInfo := c.cmdInfo(cmd.Name())
+	return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+	if pos == 0 {
+		return hashtag.RandomSlot()
+	}
+	firstKey := cmd.stringArg(pos)
+	return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdNode(cmdInfo *CommandInfo, slot int) (*clusterNode, error) {
+	state, err := c.state.Get()
+	if err != nil {
+		return nil, err
+	}
+
+	if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
+		return c.slotReadOnlyNode(state, slot)
+	}
+	return state.slotMasterNode(slot)
+}
+
+func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+	if c.opt.RouteByLatency {
+		return state.slotClosestNode(slot)
+	}
+	if c.opt.RouteRandomly {
+		return state.slotRandomNode(slot)
+	}
+	return state.slotSlaveNode(slot)
+}
+
+func (c *ClusterClient) slotMasterNode(slot int) (*clusterNode, error) {
+	state, err := c.state.Get()
+	if err != nil {
+		return nil, err
+	}
+	return state.slotMasterNode(slot)
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+	for _, n := range nodes {
+		if n == node {
+			return nodes
+		}
+	}
+	return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+	for _, e := range es {
+		for _, s := range ss {
+			if s == e {
+				continue loop
+			}
+		}
+		ss = append(ss, e)
+	}
+	return ss
+}
+
+func remove(ss []string, es ...string) []string {
+	if len(es) == 0 {
+		return ss[:0]
+	}
+	for _, e := range es {
+		for i, s := range ss {
+			if s == e {
+				ss = append(ss[:i], ss[i+1:]...)
+				break
+			}
+		}
+	}
+	return ss
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsMap struct {
+	mu sync.Mutex
+	m  map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+	return &cmdsMap{
+		m: make(map[*clusterNode][]Cmder),
+	}
+}
+
+func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
+	m.mu.Lock()
+	m.m[node] = append(m.m[node], cmds...)
+	m.mu.Unlock()
+}

+ 22 - 0
cluster_commands.go

@@ -0,0 +1,22 @@
+package redis
+
+import "sync/atomic"
+
+func (c *ClusterClient) DBSize() *IntCmd {
+	cmd := NewIntCmd("dbsize")
+	var size int64
+	err := c.ForEachMaster(func(master *Client) error {
+		n, err := master.DBSize().Result()
+		if err != nil {
+			return err
+		}
+		atomic.AddInt64(&size, n)
+		return nil
+	})
+	if err != nil {
+		cmd.SetErr(err)
+		return cmd
+	}
+	cmd.val = size
+	return cmd
+}

+ 1051 - 0
cluster_test.go

@@ -0,0 +1,1051 @@
+package redis_test
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/v7"
+	"github.com/go-redis/redis/v7/internal/hashtag"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+type clusterScenario struct {
+	ports     []string
+	nodeIDs   []string
+	processes map[string]*redisProcess
+	clients   map[string]*redis.Client
+}
+
+func (s *clusterScenario) masters() []*redis.Client {
+	result := make([]*redis.Client, 3)
+	for pos, port := range s.ports[:3] {
+		result[pos] = s.clients[port]
+	}
+	return result
+}
+
+func (s *clusterScenario) slaves() []*redis.Client {
+	result := make([]*redis.Client, 3)
+	for pos, port := range s.ports[3:] {
+		result[pos] = s.clients[port]
+	}
+	return result
+}
+
+func (s *clusterScenario) addrs() []string {
+	addrs := make([]string, len(s.ports))
+	for i, port := range s.ports {
+		addrs[i] = net.JoinHostPort("127.0.0.1", port)
+	}
+	return addrs
+}
+
+func (s *clusterScenario) clusterClientUnsafe(opt *redis.ClusterOptions) *redis.ClusterClient {
+	opt.Addrs = s.addrs()
+	return redis.NewClusterClient(opt)
+
+}
+
+func (s *clusterScenario) clusterClient(opt *redis.ClusterOptions) *redis.ClusterClient {
+	client := s.clusterClientUnsafe(opt)
+
+	err := eventually(func() error {
+		if opt.ClusterSlots != nil {
+			return nil
+		}
+
+		state, err := client.LoadState()
+		if err != nil {
+			return err
+		}
+
+		if !state.IsConsistent() {
+			return fmt.Errorf("cluster state is not consistent")
+		}
+
+		return nil
+	}, 30*time.Second)
+	if err != nil {
+		panic(err)
+	}
+
+	return client
+}
+
+func startCluster(scenario *clusterScenario) error {
+	// Start processes and collect node ids
+	for pos, port := range scenario.ports {
+		process, err := startRedis(port, "--cluster-enabled", "yes")
+		if err != nil {
+			return err
+		}
+
+		client := redis.NewClient(&redis.Options{
+			Addr: ":" + port,
+		})
+
+		info, err := client.ClusterNodes().Result()
+		if err != nil {
+			return err
+		}
+
+		scenario.processes[port] = process
+		scenario.clients[port] = client
+		scenario.nodeIDs[pos] = info[:40]
+	}
+
+	// Meet cluster nodes.
+	for _, client := range scenario.clients {
+		err := client.ClusterMeet("127.0.0.1", scenario.ports[0]).Err()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Bootstrap masters.
+	slots := []int{0, 5000, 10000, 16384}
+	for pos, master := range scenario.masters() {
+		err := master.ClusterAddSlotsRange(slots[pos], slots[pos+1]-1).Err()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Bootstrap slaves.
+	for idx, slave := range scenario.slaves() {
+		masterID := scenario.nodeIDs[idx]
+
+		// Wait until master is available
+		err := eventually(func() error {
+			s := slave.ClusterNodes().Val()
+			wanted := masterID
+			if !strings.Contains(s, wanted) {
+				return fmt.Errorf("%q does not contain %q", s, wanted)
+			}
+			return nil
+		}, 10*time.Second)
+		if err != nil {
+			return err
+		}
+
+		err = slave.ClusterReplicate(masterID).Err()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Wait until all nodes have consistent info.
+	wanted := []redis.ClusterSlot{{
+		Start: 0,
+		End:   4999,
+		Nodes: []redis.ClusterNode{{
+			ID:   "",
+			Addr: "127.0.0.1:8220",
+		}, {
+			ID:   "",
+			Addr: "127.0.0.1:8223",
+		}},
+	}, {
+		Start: 5000,
+		End:   9999,
+		Nodes: []redis.ClusterNode{{
+			ID:   "",
+			Addr: "127.0.0.1:8221",
+		}, {
+			ID:   "",
+			Addr: "127.0.0.1:8224",
+		}},
+	}, {
+		Start: 10000,
+		End:   16383,
+		Nodes: []redis.ClusterNode{{
+			ID:   "",
+			Addr: "127.0.0.1:8222",
+		}, {
+			ID:   "",
+			Addr: "127.0.0.1:8225",
+		}},
+	}}
+	for _, client := range scenario.clients {
+		err := eventually(func() error {
+			res, err := client.ClusterSlots().Result()
+			if err != nil {
+				return err
+			}
+			return assertSlotsEqual(res, wanted)
+		}, 30*time.Second)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func assertSlotsEqual(slots, wanted []redis.ClusterSlot) error {
+outerLoop:
+	for _, s2 := range wanted {
+		for _, s1 := range slots {
+			if slotEqual(s1, s2) {
+				continue outerLoop
+			}
+		}
+		return fmt.Errorf("%v not found in %v", s2, slots)
+	}
+	return nil
+}
+
+func slotEqual(s1, s2 redis.ClusterSlot) bool {
+	if s1.Start != s2.Start {
+		return false
+	}
+	if s1.End != s2.End {
+		return false
+	}
+	if len(s1.Nodes) != len(s2.Nodes) {
+		return false
+	}
+	for i, n1 := range s1.Nodes {
+		if n1.Addr != s2.Nodes[i].Addr {
+			return false
+		}
+	}
+	return true
+}
+
+func stopCluster(scenario *clusterScenario) error {
+	for _, client := range scenario.clients {
+		if err := client.Close(); err != nil {
+			return err
+		}
+	}
+	for _, process := range scenario.processes {
+		if err := process.Close(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+var _ = Describe("ClusterClient", func() {
+	var failover bool
+	var opt *redis.ClusterOptions
+	var client *redis.ClusterClient
+
+	assertClusterClient := func() {
+		It("supports WithContext", func() {
+			c, cancel := context.WithCancel(context.Background())
+			cancel()
+
+			err := client.WithContext(c).Ping().Err()
+			Expect(err).To(MatchError("context canceled"))
+		})
+
+		It("should GET/SET/DEL", func() {
+			err := client.Get("A").Err()
+			Expect(err).To(Equal(redis.Nil))
+
+			err = client.Set("A", "VALUE", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			Eventually(func() string {
+				return client.Get("A").Val()
+			}, 30*time.Second).Should(Equal("VALUE"))
+
+			cnt, err := client.Del("A").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(cnt).To(Equal(int64(1)))
+		})
+
+		It("GET follows redirects", func() {
+			err := client.Set("A", "VALUE", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			if !failover {
+				Eventually(func() int64 {
+					nodes, err := client.Nodes("A")
+					if err != nil {
+						return 0
+					}
+					return nodes[1].Client.DBSize().Val()
+				}, 30*time.Second).Should(Equal(int64(1)))
+
+				Eventually(func() error {
+					return client.SwapNodes("A")
+				}, 30*time.Second).ShouldNot(HaveOccurred())
+			}
+
+			v, err := client.Get("A").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(v).To(Equal("VALUE"))
+		})
+
+		It("SET follows redirects", func() {
+			if !failover {
+				Eventually(func() error {
+					return client.SwapNodes("A")
+				}, 30*time.Second).ShouldNot(HaveOccurred())
+			}
+
+			err := client.Set("A", "VALUE", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			v, err := client.Get("A").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(v).To(Equal("VALUE"))
+		})
+
+		It("distributes keys", func() {
+			for i := 0; i < 100; i++ {
+				err := client.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+
+			client.ForEachMaster(func(master *redis.Client) error {
+				defer GinkgoRecover()
+				Eventually(func() string {
+					return master.Info("keyspace").Val()
+				}, 30*time.Second).Should(Or(
+					ContainSubstring("keys=31"),
+					ContainSubstring("keys=29"),
+					ContainSubstring("keys=40"),
+				))
+				return nil
+			})
+		})
+
+		It("distributes keys when using EVAL", func() {
+			script := redis.NewScript(`
+				local r = redis.call('SET', KEYS[1], ARGV[1])
+				return r
+			`)
+
+			var key string
+			for i := 0; i < 100; i++ {
+				key = fmt.Sprintf("key%d", i)
+				err := script.Run(client, []string{key}, "value").Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+
+			client.ForEachMaster(func(master *redis.Client) error {
+				defer GinkgoRecover()
+				Eventually(func() string {
+					return master.Info("keyspace").Val()
+				}, 30*time.Second).Should(Or(
+					ContainSubstring("keys=31"),
+					ContainSubstring("keys=29"),
+					ContainSubstring("keys=40"),
+				))
+				return nil
+			})
+		})
+
+		It("supports Watch", func() {
+			var incr func(string) error
+
+			// Transactionally increments key using GET and SET commands.
+			incr = func(key string) error {
+				err := client.Watch(func(tx *redis.Tx) error {
+					n, err := tx.Get(key).Int64()
+					if err != nil && err != redis.Nil {
+						return err
+					}
+
+					_, err = tx.TxPipelined(func(pipe redis.Pipeliner) error {
+						pipe.Set(key, strconv.FormatInt(n+1, 10), 0)
+						return nil
+					})
+					return err
+				}, key)
+				if err == redis.TxFailedErr {
+					return incr(key)
+				}
+				return err
+			}
+
+			var wg sync.WaitGroup
+			for i := 0; i < 100; i++ {
+				wg.Add(1)
+				go func() {
+					defer GinkgoRecover()
+					defer wg.Done()
+
+					err := incr("key")
+					Expect(err).NotTo(HaveOccurred())
+				}()
+			}
+			wg.Wait()
+
+			Eventually(func() string {
+				return client.Get("key").Val()
+			}, 30*time.Second).Should(Equal("100"))
+		})
+
+		Describe("pipelining", func() {
+			var pipe *redis.Pipeline
+
+			assertPipeline := func() {
+				keys := []string{"A", "B", "C", "D", "E", "F", "G"}
+
+				It("follows redirects", func() {
+					if !failover {
+						for _, key := range keys {
+							Eventually(func() error {
+								return client.SwapNodes(key)
+							}, 30*time.Second).ShouldNot(HaveOccurred())
+						}
+					}
+
+					for i, key := range keys {
+						pipe.Set(key, key+"_value", 0)
+						pipe.Expire(key, time.Duration(i+1)*time.Hour)
+					}
+					cmds, err := pipe.Exec()
+					Expect(err).NotTo(HaveOccurred())
+					Expect(cmds).To(HaveLen(14))
+
+					_ = client.ForEachNode(func(node *redis.Client) error {
+						defer GinkgoRecover()
+						Eventually(func() int64 {
+							return node.DBSize().Val()
+						}, 30*time.Second).ShouldNot(BeZero())
+						return nil
+					})
+
+					if !failover {
+						for _, key := range keys {
+							Eventually(func() error {
+								return client.SwapNodes(key)
+							}, 30*time.Second).ShouldNot(HaveOccurred())
+						}
+					}
+
+					for _, key := range keys {
+						pipe.Get(key)
+						pipe.TTL(key)
+					}
+					cmds, err = pipe.Exec()
+					Expect(err).NotTo(HaveOccurred())
+					Expect(cmds).To(HaveLen(14))
+
+					for i, key := range keys {
+						get := cmds[i*2].(*redis.StringCmd)
+						Expect(get.Val()).To(Equal(key + "_value"))
+
+						ttl := cmds[(i*2)+1].(*redis.DurationCmd)
+						dur := time.Duration(i+1) * time.Hour
+						Expect(ttl.Val()).To(BeNumerically("~", dur, 30*time.Second))
+					}
+				})
+
+				It("works with missing keys", func() {
+					pipe.Set("A", "A_value", 0)
+					pipe.Set("C", "C_value", 0)
+					_, err := pipe.Exec()
+					Expect(err).NotTo(HaveOccurred())
+
+					a := pipe.Get("A")
+					b := pipe.Get("B")
+					c := pipe.Get("C")
+					cmds, err := pipe.Exec()
+					Expect(err).To(Equal(redis.Nil))
+					Expect(cmds).To(HaveLen(3))
+
+					Expect(a.Err()).NotTo(HaveOccurred())
+					Expect(a.Val()).To(Equal("A_value"))
+
+					Expect(b.Err()).To(Equal(redis.Nil))
+					Expect(b.Val()).To(Equal(""))
+
+					Expect(c.Err()).NotTo(HaveOccurred())
+					Expect(c.Val()).To(Equal("C_value"))
+				})
+			}
+
+			Describe("with Pipeline", func() {
+				BeforeEach(func() {
+					pipe = client.Pipeline().(*redis.Pipeline)
+				})
+
+				AfterEach(func() {
+					Expect(pipe.Close()).NotTo(HaveOccurred())
+				})
+
+				assertPipeline()
+			})
+
+			Describe("with TxPipeline", func() {
+				BeforeEach(func() {
+					pipe = client.TxPipeline().(*redis.Pipeline)
+				})
+
+				AfterEach(func() {
+					Expect(pipe.Close()).NotTo(HaveOccurred())
+				})
+
+				assertPipeline()
+			})
+		})
+
+		It("supports PubSub", func() {
+			pubsub := client.Subscribe("mychannel")
+			defer pubsub.Close()
+
+			Eventually(func() error {
+				_, err := client.Publish("mychannel", "hello").Result()
+				if err != nil {
+					return err
+				}
+
+				msg, err := pubsub.ReceiveTimeout(time.Second)
+				if err != nil {
+					return err
+				}
+
+				_, ok := msg.(*redis.Message)
+				if !ok {
+					return fmt.Errorf("got %T, wanted *redis.Message", msg)
+				}
+
+				return nil
+			}, 30*time.Second).ShouldNot(HaveOccurred())
+		})
+
+		It("supports PubSub.Ping without channels", func() {
+			pubsub := client.Subscribe()
+			defer pubsub.Close()
+
+			err := pubsub.Ping()
+			Expect(err).NotTo(HaveOccurred())
+		})
+	}
+
+	Describe("ClusterClient", func() {
+		BeforeEach(func() {
+			opt = redisClusterOptions()
+			client = cluster.clusterClient(opt)
+
+			err := client.ForEachMaster(func(master *redis.Client) error {
+				return master.FlushDB().Err()
+			})
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		AfterEach(func() {
+			_ = client.ForEachMaster(func(master *redis.Client) error {
+				return master.FlushDB().Err()
+			})
+			Expect(client.Close()).NotTo(HaveOccurred())
+		})
+
+		It("returns pool stats", func() {
+			stats := client.PoolStats()
+			Expect(stats).To(BeAssignableToTypeOf(&redis.PoolStats{}))
+		})
+
+		It("returns an error when there are no attempts left", func() {
+			opt := redisClusterOptions()
+			opt.MaxRedirects = -1
+			client := cluster.clusterClient(opt)
+
+			Eventually(func() error {
+				return client.SwapNodes("A")
+			}, 30*time.Second).ShouldNot(HaveOccurred())
+
+			err := client.Get("A").Err()
+			Expect(err).To(HaveOccurred())
+			Expect(err.Error()).To(ContainSubstring("MOVED"))
+
+			Expect(client.Close()).NotTo(HaveOccurred())
+		})
+
+		It("calls fn for every master node", func() {
+			for i := 0; i < 10; i++ {
+				Expect(client.Set(strconv.Itoa(i), "", 0).Err()).NotTo(HaveOccurred())
+			}
+
+			err := client.ForEachMaster(func(master *redis.Client) error {
+				return master.FlushDB().Err()
+			})
+			Expect(err).NotTo(HaveOccurred())
+
+			size, err := client.DBSize().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(0)))
+		})
+
+		It("should CLUSTER SLOTS", func() {
+			res, err := client.ClusterSlots().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(HaveLen(3))
+
+			wanted := []redis.ClusterSlot{{
+				Start: 0,
+				End:   4999,
+				Nodes: []redis.ClusterNode{{
+					ID:   "",
+					Addr: "127.0.0.1:8220",
+				}, {
+					ID:   "",
+					Addr: "127.0.0.1:8223",
+				}},
+			}, {
+				Start: 5000,
+				End:   9999,
+				Nodes: []redis.ClusterNode{{
+					ID:   "",
+					Addr: "127.0.0.1:8221",
+				}, {
+					ID:   "",
+					Addr: "127.0.0.1:8224",
+				}},
+			}, {
+				Start: 10000,
+				End:   16383,
+				Nodes: []redis.ClusterNode{{
+					ID:   "",
+					Addr: "127.0.0.1:8222",
+				}, {
+					ID:   "",
+					Addr: "127.0.0.1:8225",
+				}},
+			}}
+			Expect(assertSlotsEqual(res, wanted)).NotTo(HaveOccurred())
+		})
+
+		It("should CLUSTER NODES", func() {
+			res, err := client.ClusterNodes().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(len(res)).To(BeNumerically(">", 400))
+		})
+
+		It("should CLUSTER INFO", func() {
+			res, err := client.ClusterInfo().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(ContainSubstring("cluster_known_nodes:6"))
+		})
+
+		It("should CLUSTER KEYSLOT", func() {
+			hashSlot, err := client.ClusterKeySlot("somekey").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(hashSlot).To(Equal(int64(hashtag.Slot("somekey"))))
+		})
+
+		It("should CLUSTER GETKEYSINSLOT", func() {
+			keys, err := client.ClusterGetKeysInSlot(hashtag.Slot("somekey"), 1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(len(keys)).To(Equal(0))
+		})
+
+		It("should CLUSTER COUNT-FAILURE-REPORTS", func() {
+			n, err := client.ClusterCountFailureReports(cluster.nodeIDs[0]).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(0)))
+		})
+
+		It("should CLUSTER COUNTKEYSINSLOT", func() {
+			n, err := client.ClusterCountKeysInSlot(10).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(0)))
+		})
+
+		It("should CLUSTER SAVECONFIG", func() {
+			res, err := client.ClusterSaveConfig().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(Equal("OK"))
+		})
+
+		It("should CLUSTER SLAVES", func() {
+			nodesList, err := client.ClusterSlaves(cluster.nodeIDs[0]).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(nodesList).Should(ContainElement(ContainSubstring("slave")))
+			Expect(nodesList).Should(HaveLen(1))
+		})
+
+		It("should RANDOMKEY", func() {
+			const nkeys = 100
+
+			for i := 0; i < nkeys; i++ {
+				err := client.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+
+			var keys []string
+			addKey := func(key string) {
+				for _, k := range keys {
+					if k == key {
+						return
+					}
+				}
+				keys = append(keys, key)
+			}
+
+			for i := 0; i < nkeys*10; i++ {
+				key := client.RandomKey().Val()
+				addKey(key)
+			}
+
+			Expect(len(keys)).To(BeNumerically("~", nkeys, nkeys/10))
+		})
+
+		assertClusterClient()
+	})
+
+	Describe("ClusterClient failover", func() {
+		BeforeEach(func() {
+			failover = true
+
+			opt = redisClusterOptions()
+			opt.MinRetryBackoff = 250 * time.Millisecond
+			opt.MaxRetryBackoff = time.Second
+			client = cluster.clusterClient(opt)
+
+			err := client.ForEachMaster(func(master *redis.Client) error {
+				return master.FlushDB().Err()
+			})
+			Expect(err).NotTo(HaveOccurred())
+
+			err = client.ForEachSlave(func(slave *redis.Client) error {
+				defer GinkgoRecover()
+
+				Eventually(func() int64 {
+					return slave.DBSize().Val()
+				}, "30s").Should(Equal(int64(0)))
+
+				return nil
+			})
+			Expect(err).NotTo(HaveOccurred())
+
+			state, err := client.LoadState()
+			Eventually(func() bool {
+				state, err = client.LoadState()
+				if err != nil {
+					return false
+				}
+				return state.IsConsistent()
+			}, "30s").Should(BeTrue())
+
+			for _, slave := range state.Slaves {
+				err = slave.Client.ClusterFailover().Err()
+				Expect(err).NotTo(HaveOccurred())
+
+				Eventually(func() bool {
+					state, _ := client.LoadState()
+					return state.IsConsistent()
+				}, "30s").Should(BeTrue())
+			}
+		})
+
+		AfterEach(func() {
+			failover = false
+			Expect(client.Close()).NotTo(HaveOccurred())
+		})
+
+		assertClusterClient()
+	})
+
+	Describe("ClusterClient with RouteByLatency", func() {
+		BeforeEach(func() {
+			opt = redisClusterOptions()
+			opt.RouteByLatency = true
+			client = cluster.clusterClient(opt)
+
+			err := client.ForEachMaster(func(master *redis.Client) error {
+				return master.FlushDB().Err()
+			})
+			Expect(err).NotTo(HaveOccurred())
+
+			err = client.ForEachSlave(func(slave *redis.Client) error {
+				Eventually(func() int64 {
+					return client.DBSize().Val()
+				}, 30*time.Second).Should(Equal(int64(0)))
+				return nil
+			})
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		AfterEach(func() {
+			err := client.ForEachSlave(func(slave *redis.Client) error {
+				return slave.ReadWrite().Err()
+			})
+			Expect(err).NotTo(HaveOccurred())
+
+			err = client.Close()
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		assertClusterClient()
+	})
+
+	Describe("ClusterClient with ClusterSlots", func() {
+		BeforeEach(func() {
+			failover = true
+
+			opt = redisClusterOptions()
+			opt.ClusterSlots = func() ([]redis.ClusterSlot, error) {
+				slots := []redis.ClusterSlot{{
+					Start: 0,
+					End:   4999,
+					Nodes: []redis.ClusterNode{{
+						Addr: ":" + ringShard1Port,
+					}},
+				}, {
+					Start: 5000,
+					End:   9999,
+					Nodes: []redis.ClusterNode{{
+						Addr: ":" + ringShard2Port,
+					}},
+				}, {
+					Start: 10000,
+					End:   16383,
+					Nodes: []redis.ClusterNode{{
+						Addr: ":" + ringShard3Port,
+					}},
+				}}
+				return slots, nil
+			}
+			client = cluster.clusterClient(opt)
+
+			err := client.ForEachMaster(func(master *redis.Client) error {
+				return master.FlushDB().Err()
+			})
+			Expect(err).NotTo(HaveOccurred())
+
+			err = client.ForEachSlave(func(slave *redis.Client) error {
+				Eventually(func() int64 {
+					return client.DBSize().Val()
+				}, 30*time.Second).Should(Equal(int64(0)))
+				return nil
+			})
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		AfterEach(func() {
+			failover = false
+
+			err := client.Close()
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		assertClusterClient()
+	})
+
+	Describe("ClusterClient with RouteRandomly and ClusterSlots", func() {
+		BeforeEach(func() {
+			failover = true
+
+			opt = redisClusterOptions()
+			opt.RouteRandomly = true
+			opt.ClusterSlots = func() ([]redis.ClusterSlot, error) {
+				slots := []redis.ClusterSlot{{
+					Start: 0,
+					End:   4999,
+					Nodes: []redis.ClusterNode{{
+						Addr: ":" + ringShard1Port,
+					}},
+				}, {
+					Start: 5000,
+					End:   9999,
+					Nodes: []redis.ClusterNode{{
+						Addr: ":" + ringShard2Port,
+					}},
+				}, {
+					Start: 10000,
+					End:   16383,
+					Nodes: []redis.ClusterNode{{
+						Addr: ":" + ringShard3Port,
+					}},
+				}}
+				return slots, nil
+			}
+			client = cluster.clusterClient(opt)
+
+			err := client.ForEachMaster(func(master *redis.Client) error {
+				return master.FlushDB().Err()
+			})
+			Expect(err).NotTo(HaveOccurred())
+
+			err = client.ForEachSlave(func(slave *redis.Client) error {
+				Eventually(func() int64 {
+					return client.DBSize().Val()
+				}, 30*time.Second).Should(Equal(int64(0)))
+				return nil
+			})
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		AfterEach(func() {
+			failover = false
+
+			err := client.Close()
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		assertClusterClient()
+	})
+})
+
+var _ = Describe("ClusterClient without nodes", func() {
+	var client *redis.ClusterClient
+
+	BeforeEach(func() {
+		client = redis.NewClusterClient(&redis.ClusterOptions{})
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("Ping returns an error", func() {
+		err := client.Ping().Err()
+		Expect(err).To(MatchError("redis: cluster has no nodes"))
+	})
+
+	It("pipeline returns an error", func() {
+		_, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+			pipe.Ping()
+			return nil
+		})
+		Expect(err).To(MatchError("redis: cluster has no nodes"))
+	})
+})
+
+var _ = Describe("ClusterClient without valid nodes", func() {
+	var client *redis.ClusterClient
+
+	BeforeEach(func() {
+		client = redis.NewClusterClient(&redis.ClusterOptions{
+			Addrs: []string{redisAddr},
+		})
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("returns an error", func() {
+		err := client.Ping().Err()
+		Expect(err).To(MatchError("ERR This instance has cluster support disabled"))
+	})
+
+	It("pipeline returns an error", func() {
+		_, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+			pipe.Ping()
+			return nil
+		})
+		Expect(err).To(MatchError("ERR This instance has cluster support disabled"))
+	})
+})
+
+var _ = Describe("ClusterClient with unavailable Cluster", func() {
+	var client *redis.ClusterClient
+
+	BeforeEach(func() {
+		for _, node := range cluster.clients {
+			err := node.ClientPause(5 * time.Second).Err()
+			Expect(err).NotTo(HaveOccurred())
+		}
+
+		opt := redisClusterOptions()
+		opt.ReadTimeout = 250 * time.Millisecond
+		opt.WriteTimeout = 250 * time.Millisecond
+		opt.MaxRedirects = 1
+		client = cluster.clusterClientUnsafe(opt)
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("recovers when Cluster recovers", func() {
+		err := client.Ping().Err()
+		Expect(err).To(HaveOccurred())
+
+		Eventually(func() error {
+			return client.Ping().Err()
+		}, "30s").ShouldNot(HaveOccurred())
+	})
+})
+
+var _ = Describe("ClusterClient timeout", func() {
+	var client *redis.ClusterClient
+
+	AfterEach(func() {
+		_ = client.Close()
+	})
+
+	testTimeout := func() {
+		It("Ping timeouts", func() {
+			err := client.Ping().Err()
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+
+		It("Pipeline timeouts", func() {
+			_, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+				pipe.Ping()
+				return nil
+			})
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+
+		It("Tx timeouts", func() {
+			err := client.Watch(func(tx *redis.Tx) error {
+				return tx.Ping().Err()
+			}, "foo")
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+
+		It("Tx Pipeline timeouts", func() {
+			err := client.Watch(func(tx *redis.Tx) error {
+				_, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+					pipe.Ping()
+					return nil
+				})
+				return err
+			}, "foo")
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+	}
+
+	const pause = 5 * time.Second
+
+	Context("read/write timeout", func() {
+		BeforeEach(func() {
+			opt := redisClusterOptions()
+			opt.ReadTimeout = 250 * time.Millisecond
+			opt.WriteTimeout = 250 * time.Millisecond
+			opt.MaxRedirects = 1
+			client = cluster.clusterClient(opt)
+
+			err := client.ForEachNode(func(client *redis.Client) error {
+				return client.ClientPause(pause).Err()
+			})
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		AfterEach(func() {
+			_ = client.ForEachNode(func(client *redis.Client) error {
+				defer GinkgoRecover()
+				Eventually(func() error {
+					return client.Ping().Err()
+				}, 2*pause).ShouldNot(HaveOccurred())
+				return nil
+			})
+		})
+
+		testTimeout()
+	})
+})

+ 2027 - 0
command.go

@@ -0,0 +1,2027 @@
+package redis
+
+import (
+	"fmt"
+	"net"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/go-redis/redis/internal"
+	"github.com/go-redis/redis/internal/proto"
+	"github.com/go-redis/redis/internal/util"
+)
+
+type Cmder interface {
+	Name() string
+	Args() []interface{}
+	stringArg(int) string
+
+	readTimeout() *time.Duration
+	readReply(rd *proto.Reader) error
+
+	SetErr(error)
+	Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+	for _, cmd := range cmds {
+		if cmd.Err() == nil {
+			cmd.SetErr(e)
+		}
+	}
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+	for _, cmd := range cmds {
+		if err := cmd.Err(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeCmd(wr *proto.Writer, cmds ...Cmder) error {
+	for _, cmd := range cmds {
+		err := wr.WriteArgs(cmd.Args())
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+	ss := make([]string, 0, len(cmd.Args()))
+	for _, arg := range cmd.Args() {
+		ss = append(ss, fmt.Sprint(arg))
+	}
+	s := strings.Join(ss, " ")
+	if err := cmd.Err(); err != nil {
+		return s + ": " + err.Error()
+	}
+	if val != nil {
+		switch vv := val.(type) {
+		case []byte:
+			return s + ": " + string(vv)
+		default:
+			return s + ": " + fmt.Sprint(val)
+		}
+	}
+	return s
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+	switch cmd.Name() {
+	case "eval", "evalsha":
+		if cmd.stringArg(2) != "0" {
+			return 3
+		}
+
+		return 0
+	case "publish":
+		return 1
+	}
+	if info == nil {
+		return 0
+	}
+	return int(info.FirstKeyPos)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+	args []interface{}
+	err  error
+
+	_readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+	if len(cmd.args) == 0 {
+		return ""
+	}
+	// Cmd name must be lower cased.
+	return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+	return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+	if pos < 0 || pos >= len(cmd.args) {
+		return ""
+	}
+	s, _ := cmd.args[pos].(string)
+	return s
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+	cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+	return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+	return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+	cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+	baseCmd
+
+	val interface{}
+}
+
+func NewCmd(args ...interface{}) *Cmd {
+	return &Cmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *Cmd) Val() interface{} {
+	return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) String() (string, error) {
+	if cmd.err != nil {
+		return "", cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case string:
+		return val, nil
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for String", val)
+		return "", err
+	}
+}
+
+func (cmd *Cmd) Int() (int, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case int64:
+		return int(val), nil
+	case string:
+		return strconv.Atoi(val)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case int64:
+		return val, nil
+	case string:
+		return strconv.ParseInt(val, 10, 64)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case int64:
+		return uint64(val), nil
+	case string:
+		return strconv.ParseUint(val, 10, 64)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case int64:
+		return float32(val), nil
+	case string:
+		f, err := strconv.ParseFloat(val, 32)
+		if err != nil {
+			return 0, err
+		}
+		return float32(f), nil
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case int64:
+		return float64(val), nil
+	case string:
+		return strconv.ParseFloat(val, 64)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+	if cmd.err != nil {
+		return false, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case int64:
+		return val != 0, nil
+	case string:
+		return strconv.ParseBool(val)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+		return false, err
+	}
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) error {
+	cmd.val, cmd.err = rd.ReadReply(sliceParser)
+	return cmd.err
+}
+
+// Implements proto.MultiBulkParse
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	vals := make([]interface{}, n)
+	for i := 0; i < len(vals); i++ {
+		v, err := rd.ReadReply(sliceParser)
+		if err != nil {
+			if err == Nil {
+				vals[i] = nil
+				continue
+			}
+			if err, ok := err.(proto.RedisError); ok {
+				vals[i] = err
+				continue
+			}
+			return nil, err
+		}
+		vals[i] = v
+	}
+	return vals, nil
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+	baseCmd
+
+	val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(args ...interface{}) *SliceCmd {
+	return &SliceCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+	return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
+	var v interface{}
+	v, cmd.err = rd.ReadArrayReply(sliceParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.([]interface{})
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+	baseCmd
+
+	val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(args ...interface{}) *StatusCmd {
+	return &StatusCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *StatusCmd) Val() string {
+	return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) error {
+	cmd.val, cmd.err = rd.ReadString()
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+	baseCmd
+
+	val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(args ...interface{}) *IntCmd {
+	return &IntCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *IntCmd) Val() int64 {
+	return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+	return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) error {
+	cmd.val, cmd.err = rd.ReadIntReply()
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+	baseCmd
+
+	val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(args ...interface{}) *IntSliceCmd {
+	return &IntSliceCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+	return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]int64, n)
+		for i := 0; i < len(cmd.val); i++ {
+			num, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			cmd.val[i] = num
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+	baseCmd
+
+	val       time.Duration
+	precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd {
+	return &DurationCmd{
+		baseCmd:   baseCmd{args: args},
+		precision: precision,
+	}
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+	return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+	var n int64
+	n, cmd.err = rd.ReadIntReply()
+	if cmd.err != nil {
+		return cmd.err
+	}
+	switch n {
+	// -2 if the key does not exist
+	// -1 if the key exists but has no associated expire
+	case -2, -1:
+		cmd.val = time.Duration(n)
+	default:
+		cmd.val = time.Duration(n) * cmd.precision
+	}
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+	baseCmd
+
+	val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(args ...interface{}) *TimeCmd {
+	return &TimeCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+	return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d elements, expected 2", n)
+		}
+
+		sec, err := rd.ReadInt()
+		if err != nil {
+			return nil, err
+		}
+
+		microsec, err := rd.ReadInt()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val = time.Unix(sec, microsec*1000)
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+	baseCmd
+
+	val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(args ...interface{}) *BoolCmd {
+	return &BoolCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *BoolCmd) Val() bool {
+	return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
+	var v interface{}
+	v, cmd.err = rd.ReadReply(nil)
+	// `SET key value NX` returns nil when key already exists. But
+	// `SETNX key value` returns bool (0/1). So convert nil to bool.
+	if cmd.err == Nil {
+		cmd.val = false
+		cmd.err = nil
+		return nil
+	}
+	if cmd.err != nil {
+		return cmd.err
+	}
+	switch v := v.(type) {
+	case int64:
+		cmd.val = v == 1
+		return nil
+	case string:
+		cmd.val = v == "OK"
+		return nil
+	default:
+		cmd.err = fmt.Errorf("got %T, wanted int64 or string", v)
+		return cmd.err
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+	baseCmd
+
+	val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(args ...interface{}) *StringCmd {
+	return &StringCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *StringCmd) Val() string {
+	return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+	return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+	return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	f, err := strconv.ParseFloat(cmd.Val(), 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+	if cmd.err != nil {
+		return time.Time{}, cmd.err
+	}
+	return time.Parse(time.RFC3339, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+	return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) error {
+	cmd.val, cmd.err = rd.ReadString()
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+	baseCmd
+
+	val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(args ...interface{}) *FloatCmd {
+	return &FloatCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *FloatCmd) Val() float64 {
+	return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) error {
+	cmd.val, cmd.err = rd.ReadFloatReply()
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+	baseCmd
+
+	val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(args ...interface{}) *StringSliceCmd {
+	return &StringSliceCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+	return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+	return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]string, n)
+		for i := 0; i < len(cmd.val); i++ {
+			switch s, err := rd.ReadString(); {
+			case err == Nil:
+				cmd.val[i] = ""
+			case err != nil:
+				return nil, err
+			default:
+				cmd.val[i] = s
+			}
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+	baseCmd
+
+	val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd {
+	return &BoolSliceCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+	return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]bool, n)
+		for i := 0; i < len(cmd.val); i++ {
+			n, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			cmd.val[i] = n == 1
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+	baseCmd
+
+	val map[string]string
+}
+
+var _ Cmder = (*StringStringMapCmd)(nil)
+
+func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd {
+	return &StringStringMapCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+	return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]string, n/2)
+		for i := int64(0); i < n; i += 2 {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			value, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			cmd.val[key] = value
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringIntMapCmd struct {
+	baseCmd
+
+	val map[string]int64
+}
+
+var _ Cmder = (*StringIntMapCmd)(nil)
+
+func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd {
+	return &StringIntMapCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *StringIntMapCmd) Val() map[string]int64 {
+	return cmd.val
+}
+
+func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringIntMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]int64, n/2)
+		for i := int64(0); i < n; i += 2 {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			n, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			cmd.val[key] = n
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+	baseCmd
+
+	val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd {
+	return &StringStructMapCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+	return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]struct{}, n)
+		for i := int64(0); i < n; i++ {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+			cmd.val[key] = struct{}{}
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+	ID     string
+	Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+	baseCmd
+
+	val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(args ...interface{}) *XMessageSliceCmd {
+	return &XMessageSliceCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+	return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
+	var v interface{}
+	v, cmd.err = rd.ReadArrayReply(xMessageSliceParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.([]XMessage)
+	return nil
+}
+
+// Implements proto.MultiBulkParse
+func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	msgs := make([]XMessage, n)
+	for i := 0; i < len(msgs); i++ {
+		i := i
+		_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+			id, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			var values map[string]interface{}
+
+			v, err := rd.ReadArrayReply(stringInterfaceMapParser)
+			if err != nil {
+				if err != proto.Nil {
+					return nil, err
+				}
+			} else {
+				values = v.(map[string]interface{})
+			}
+
+			msgs[i] = XMessage{
+				ID:     id,
+				Values: values,
+			}
+			return nil, nil
+		})
+		if err != nil {
+			return nil, err
+		}
+	}
+	return msgs, nil
+}
+
+// Implements proto.MultiBulkParse
+func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+	m := make(map[string]interface{}, n/2)
+	for i := int64(0); i < n; i += 2 {
+		key, err := rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		value, err := rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		m[key] = value
+	}
+	return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+	Stream   string
+	Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+	baseCmd
+
+	val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(args ...interface{}) *XStreamSliceCmd {
+	return &XStreamSliceCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+	return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]XStream, n)
+		for i := 0; i < len(cmd.val); i++ {
+			i := i
+			_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+				if n != 2 {
+					return nil, fmt.Errorf("got %d, wanted 2", n)
+				}
+
+				stream, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				v, err := rd.ReadArrayReply(xMessageSliceParser)
+				if err != nil {
+					return nil, err
+				}
+
+				cmd.val[i] = XStream{
+					Stream:   stream,
+					Messages: v.([]XMessage),
+				}
+				return nil, nil
+			})
+			if err != nil {
+				return nil, err
+			}
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+	Count     int64
+	Lower     string
+	Higher    string
+	Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+	baseCmd
+	val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(args ...interface{}) *XPendingCmd {
+	return &XPendingCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+	return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 4 {
+			return nil, fmt.Errorf("got %d, wanted 4", n)
+		}
+
+		count, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+
+		lower, err := rd.ReadString()
+		if err != nil && err != Nil {
+			return nil, err
+		}
+
+		higher, err := rd.ReadString()
+		if err != nil && err != Nil {
+			return nil, err
+		}
+
+		cmd.val = &XPending{
+			Count:  count,
+			Lower:  lower,
+			Higher: higher,
+		}
+		_, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+			for i := int64(0); i < n; i++ {
+				_, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+					if n != 2 {
+						return nil, fmt.Errorf("got %d, wanted 2", n)
+					}
+
+					consumerName, err := rd.ReadString()
+					if err != nil {
+						return nil, err
+					}
+
+					consumerPending, err := rd.ReadInt()
+					if err != nil {
+						return nil, err
+					}
+
+					if cmd.val.Consumers == nil {
+						cmd.val.Consumers = make(map[string]int64)
+					}
+					cmd.val.Consumers[consumerName] = consumerPending
+
+					return nil, nil
+				})
+				if err != nil {
+					return nil, err
+				}
+			}
+			return nil, nil
+		})
+		if err != nil && err != Nil {
+			return nil, err
+		}
+
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+	ID         string
+	Consumer   string
+	Idle       time.Duration
+	RetryCount int64
+}
+
+type XPendingExtCmd struct {
+	baseCmd
+	val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(args ...interface{}) *XPendingExtCmd {
+	return &XPendingExtCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+	return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]XPendingExt, 0, n)
+		for i := int64(0); i < n; i++ {
+			_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+				if n != 4 {
+					return nil, fmt.Errorf("got %d, wanted 4", n)
+				}
+
+				id, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				consumer, err := rd.ReadString()
+				if err != nil && err != Nil {
+					return nil, err
+				}
+
+				idle, err := rd.ReadIntReply()
+				if err != nil && err != Nil {
+					return nil, err
+				}
+
+				retryCount, err := rd.ReadIntReply()
+				if err != nil && err != Nil {
+					return nil, err
+				}
+
+				cmd.val = append(cmd.val, XPendingExt{
+					ID:         id,
+					Consumer:   consumer,
+					Idle:       time.Duration(idle) * time.Millisecond,
+					RetryCount: retryCount,
+				})
+				return nil, nil
+			})
+			if err != nil {
+				return nil, err
+			}
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+	baseCmd
+	val []XInfoGroups
+}
+
+type XInfoGroups struct {
+	Name            string
+	Consumers       int64
+	Pending         int64
+	LastDeliveredID string
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(stream string) *XInfoGroupsCmd {
+	return &XInfoGroupsCmd{
+		baseCmd: baseCmd{args: []interface{}{"xinfo", "groups", stream}},
+	}
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroups {
+	return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroups, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(
+		func(rd *proto.Reader, n int64) (interface{}, error) {
+			for i := int64(0); i < n; i++ {
+				v, err := rd.ReadReply(xGroupInfoParser)
+				if err != nil {
+					return nil, err
+				}
+				cmd.val = append(cmd.val, v.(XInfoGroups))
+			}
+			return nil, nil
+		})
+	return nil
+}
+
+func xGroupInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+	if n != 8 {
+		return nil, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply,"+
+			"wanted 8", n)
+	}
+	var (
+		err error
+		grp XInfoGroups
+		key string
+		val string
+	)
+
+	for i := 0; i < 4; i++ {
+		key, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+		val, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+		switch key {
+		case "name":
+			grp.Name = val
+		case "consumers":
+			grp.Consumers, err = strconv.ParseInt(val, 0, 64)
+		case "pending":
+			grp.Pending, err = strconv.ParseInt(val, 0, 64)
+		case "last-delivered-id":
+			grp.LastDeliveredID = val
+		default:
+			return nil, fmt.Errorf("redis: unexpected content %s "+
+				"in XINFO GROUPS reply", key)
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+	return grp, err
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+	baseCmd
+
+	val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(args ...interface{}) *ZSliceCmd {
+	return &ZSliceCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+	return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]Z, n/2)
+		for i := 0; i < len(cmd.val); i++ {
+			member, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			score, err := rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+
+			cmd.val[i] = Z{
+				Member: member,
+				Score:  score,
+			}
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+	baseCmd
+
+	val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(args ...interface{}) *ZWithKeyCmd {
+	return &ZWithKeyCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+	return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 3 {
+			return nil, fmt.Errorf("got %d elements, expected 3", n)
+		}
+
+		cmd.val = &ZWithKey{}
+		var err error
+
+		cmd.val.Key, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val.Member, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val.Score, err = rd.ReadFloatReply()
+		if err != nil {
+			return nil, err
+		}
+
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+	baseCmd
+
+	page   []string
+	cursor uint64
+
+	process func(cmd Cmder) error
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd {
+	return &ScanCmd{
+		baseCmd: baseCmd{args: args},
+		process: process,
+	}
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+	return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+	return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+	return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+	cmd.page, cmd.cursor, cmd.err = rd.ReadScanReply()
+	return cmd.err
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+	return &ScanIterator{
+		cmd: cmd,
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+	ID   string
+	Addr string
+}
+
+type ClusterSlot struct {
+	Start int
+	End   int
+	Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+	baseCmd
+
+	val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd {
+	return &ClusterSlotsCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+	return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]ClusterSlot, n)
+		for i := 0; i < len(cmd.val); i++ {
+			n, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if n < 2 {
+				err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+				return nil, err
+			}
+
+			start, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			end, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			nodes := make([]ClusterNode, n-2)
+			for j := 0; j < len(nodes); j++ {
+				n, err := rd.ReadArrayLen()
+				if err != nil {
+					return nil, err
+				}
+				if n != 2 && n != 3 {
+					err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+					return nil, err
+				}
+
+				ip, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				port, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				nodes[j].Addr = net.JoinHostPort(ip, port)
+
+				if n == 3 {
+					id, err := rd.ReadString()
+					if err != nil {
+						return nil, err
+					}
+					nodes[j].ID = id
+				}
+			}
+
+			cmd.val[i] = ClusterSlot{
+				Start: int(start),
+				End:   int(end),
+				Nodes: nodes,
+			}
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+	Name                      string
+	Longitude, Latitude, Dist float64
+	GeoHash                   int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+	Radius float64
+	// Can be m, km, ft, or mi. Default is km.
+	Unit        string
+	WithCoord   bool
+	WithDist    bool
+	WithGeoHash bool
+	Count       int
+	// Can be ASC or DESC. Default is no sort order.
+	Sort      string
+	Store     string
+	StoreDist string
+}
+
+type GeoLocationCmd struct {
+	baseCmd
+
+	q         *GeoRadiusQuery
+	locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+	return &GeoLocationCmd{
+		baseCmd: baseCmd{args: geoLocationArgs(q, args...)},
+		q:       q,
+	}
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+	args = append(args, q.Radius)
+	if q.Unit != "" {
+		args = append(args, q.Unit)
+	} else {
+		args = append(args, "km")
+	}
+	if q.WithCoord {
+		args = append(args, "withcoord")
+	}
+	if q.WithDist {
+		args = append(args, "withdist")
+	}
+	if q.WithGeoHash {
+		args = append(args, "withhash")
+	}
+	if q.Count > 0 {
+		args = append(args, "count", q.Count)
+	}
+	if q.Sort != "" {
+		args = append(args, q.Sort)
+	}
+	if q.Store != "" {
+		args = append(args, "store")
+		args = append(args, q.Store)
+	}
+	if q.StoreDist != "" {
+		args = append(args, "storedist")
+		args = append(args, q.StoreDist)
+	}
+	return args
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+	return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+	return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+	return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+	var v interface{}
+	v, cmd.err = rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.locations = v.([]GeoLocation)
+	return nil
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+	return func(rd *proto.Reader, n int64) (interface{}, error) {
+		locs := make([]GeoLocation, 0, n)
+		for i := int64(0); i < n; i++ {
+			v, err := rd.ReadReply(newGeoLocationParser(q))
+			if err != nil {
+				return nil, err
+			}
+			switch vv := v.(type) {
+			case string:
+				locs = append(locs, GeoLocation{
+					Name: vv,
+				})
+			case *GeoLocation:
+				//TODO: avoid copying
+				locs = append(locs, *vv)
+			default:
+				return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+			}
+		}
+		return locs, nil
+	}
+}
+
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+	return func(rd *proto.Reader, n int64) (interface{}, error) {
+		var loc GeoLocation
+		var err error
+
+		loc.Name, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+		if q.WithDist {
+			loc.Dist, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+		if q.WithGeoHash {
+			loc.GeoHash, err = rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+		if q.WithCoord {
+			n, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if n != 2 {
+				return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+			}
+
+			loc.Longitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+			loc.Latitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		return &loc, nil
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+	Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+	baseCmd
+
+	val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(args ...interface{}) *GeoPosCmd {
+	return &GeoPosCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+	return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]*GeoPos, n)
+		for i := 0; i < len(cmd.val); i++ {
+			i := i
+			_, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+				longitude, err := rd.ReadFloatReply()
+				if err != nil {
+					return nil, err
+				}
+
+				latitude, err := rd.ReadFloatReply()
+				if err != nil {
+					return nil, err
+				}
+
+				cmd.val[i] = &GeoPos{
+					Longitude: longitude,
+					Latitude:  latitude,
+				}
+				return nil, nil
+			})
+			if err != nil {
+				if err == Nil {
+					cmd.val[i] = nil
+					continue
+				}
+				return nil, err
+			}
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+	Name        string
+	Arity       int8
+	Flags       []string
+	FirstKeyPos int8
+	LastKeyPos  int8
+	StepCount   int8
+	ReadOnly    bool
+}
+
+type CommandsInfoCmd struct {
+	baseCmd
+
+	val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd {
+	return &CommandsInfoCmd{
+		baseCmd: baseCmd{args: args},
+	}
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+	return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+	_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]*CommandInfo, n)
+		for i := int64(0); i < n; i++ {
+			v, err := rd.ReadReply(commandInfoParser)
+			if err != nil {
+				return nil, err
+			}
+			vv := v.(*CommandInfo)
+			cmd.val[vv.Name] = vv
+		}
+		return nil, nil
+	})
+	return cmd.err
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+	if n != 6 {
+		return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6", n)
+	}
+
+	var cmd CommandInfo
+	var err error
+
+	cmd.Name, err = rd.ReadString()
+	if err != nil {
+		return nil, err
+	}
+
+	arity, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.Arity = int8(arity)
+
+	_, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.Flags = make([]string, n)
+		for i := 0; i < len(cmd.Flags); i++ {
+			switch s, err := rd.ReadString(); {
+			case err == Nil:
+				cmd.Flags[i] = ""
+			case err != nil:
+				return nil, err
+			default:
+				cmd.Flags[i] = s
+			}
+		}
+		return nil, nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	firstKeyPos, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.FirstKeyPos = int8(firstKeyPos)
+
+	lastKeyPos, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.LastKeyPos = int8(lastKeyPos)
+
+	stepCount, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.StepCount = int8(stepCount)
+
+	for _, flag := range cmd.Flags {
+		if flag == "readonly" {
+			cmd.ReadOnly = true
+			break
+		}
+	}
+
+	return &cmd, nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+	fn func() (map[string]*CommandInfo, error)
+
+	once internal.Once
+	cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func() (map[string]*CommandInfo, error)) *cmdsInfoCache {
+	return &cmdsInfoCache{
+		fn: fn,
+	}
+}
+
+func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) {
+	err := c.once.Do(func() error {
+		cmds, err := c.fn()
+		if err != nil {
+			return err
+		}
+
+		// Extensions have cmd names in upper case. Convert them to lower case.
+		for k, v := range cmds {
+			lower := internal.ToLower(k)
+			if lower != k {
+				cmds[lower] = v
+			}
+		}
+
+		c.cmds = cmds
+		return nil
+	})
+	return c.cmds, err
+}

+ 96 - 0
command_test.go

@@ -0,0 +1,96 @@
+package redis_test
+
+import (
+	"errors"
+	"time"
+
+	redis "github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("Cmd", func() {
+	var client *redis.Client
+
+	BeforeEach(func() {
+		client = redis.NewClient(redisOptions())
+		Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("implements Stringer", func() {
+		set := client.Set("foo", "bar", 0)
+		Expect(set.String()).To(Equal("set foo bar: OK"))
+
+		get := client.Get("foo")
+		Expect(get.String()).To(Equal("get foo: bar"))
+	})
+
+	It("has val/err", func() {
+		set := client.Set("key", "hello", 0)
+		Expect(set.Err()).NotTo(HaveOccurred())
+		Expect(set.Val()).To(Equal("OK"))
+
+		get := client.Get("key")
+		Expect(get.Err()).NotTo(HaveOccurred())
+		Expect(get.Val()).To(Equal("hello"))
+
+		Expect(set.Err()).NotTo(HaveOccurred())
+		Expect(set.Val()).To(Equal("OK"))
+	})
+
+	It("has helpers", func() {
+		set := client.Set("key", "10", 0)
+		Expect(set.Err()).NotTo(HaveOccurred())
+
+		n, err := client.Get("key").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(n).To(Equal(int64(10)))
+
+		un, err := client.Get("key").Uint64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(un).To(Equal(uint64(10)))
+
+		f, err := client.Get("key").Float64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(f).To(Equal(float64(10)))
+	})
+
+	It("supports float32", func() {
+		f := float32(66.97)
+
+		err := client.Set("float_key", f, 0).Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		val, err := client.Get("float_key").Float32()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(val).To(Equal(f))
+	})
+
+	It("supports time.Time", func() {
+		tm := time.Date(2019, 01, 01, 0, 0, 0, 0, time.UTC)
+
+		err := client.Set("time_key", tm, 0).Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		s, err := client.Get("time_key").Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(s).To(Equal("2019-01-01T00:00:00Z"))
+
+		tm2, err := client.Get("time_key").Time()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(tm2).To(BeTemporally("==", tm))
+	})
+
+	It("allow to set custom error", func() {
+		e := errors.New("custom error")
+		cmd := redis.Cmd{}
+		cmd.SetErr(e)
+		_, err := cmd.Result()
+		Expect(err).To(Equal(e))
+	})
+})

+ 2628 - 0
commands.go

@@ -0,0 +1,2628 @@
+package redis
+
+import (
+	"errors"
+	"io"
+	"time"
+
+	"github.com/go-redis/redis/internal"
+)
+
+func usePrecise(dur time.Duration) bool {
+	return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(dur time.Duration) int64 {
+	if dur > 0 && dur < time.Millisecond {
+		internal.Logger.Printf(
+			"specified duration is %s, but minimal supported value is %s",
+			dur, time.Millisecond,
+		)
+	}
+	return int64(dur / time.Millisecond)
+}
+
+func formatSec(dur time.Duration) int64 {
+	if dur > 0 && dur < time.Second {
+		internal.Logger.Printf(
+			"specified duration is %s, but minimal supported value is %s",
+			dur, time.Second,
+		)
+	}
+	return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+	if len(src) == 1 {
+		switch v := src[0].(type) {
+		case []string:
+			for _, s := range v {
+				dst = append(dst, s)
+			}
+			return dst
+		case map[string]interface{}:
+			for k, v := range v {
+				dst = append(dst, k, v)
+			}
+			return dst
+		}
+	}
+
+	dst = append(dst, src...)
+	return dst
+}
+
+type Cmdable interface {
+	Pipeline() Pipeliner
+	Pipelined(fn func(Pipeliner) error) ([]Cmder, error)
+
+	TxPipelined(fn func(Pipeliner) error) ([]Cmder, error)
+	TxPipeline() Pipeliner
+
+	Command() *CommandsInfoCmd
+	ClientGetName() *StringCmd
+	Echo(message interface{}) *StringCmd
+	Ping() *StatusCmd
+	Quit() *StatusCmd
+	Del(keys ...string) *IntCmd
+	Unlink(keys ...string) *IntCmd
+	Dump(key string) *StringCmd
+	Exists(keys ...string) *IntCmd
+	Expire(key string, expiration time.Duration) *BoolCmd
+	ExpireAt(key string, tm time.Time) *BoolCmd
+	Keys(pattern string) *StringSliceCmd
+	Migrate(host, port, key string, db int, timeout time.Duration) *StatusCmd
+	Move(key string, db int) *BoolCmd
+	ObjectRefCount(key string) *IntCmd
+	ObjectEncoding(key string) *StringCmd
+	ObjectIdleTime(key string) *DurationCmd
+	Persist(key string) *BoolCmd
+	PExpire(key string, expiration time.Duration) *BoolCmd
+	PExpireAt(key string, tm time.Time) *BoolCmd
+	PTTL(key string) *DurationCmd
+	RandomKey() *StringCmd
+	Rename(key, newkey string) *StatusCmd
+	RenameNX(key, newkey string) *BoolCmd
+	Restore(key string, ttl time.Duration, value string) *StatusCmd
+	RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd
+	Sort(key string, sort *Sort) *StringSliceCmd
+	SortStore(key, store string, sort *Sort) *IntCmd
+	SortInterfaces(key string, sort *Sort) *SliceCmd
+	Touch(keys ...string) *IntCmd
+	TTL(key string) *DurationCmd
+	Type(key string) *StatusCmd
+	Scan(cursor uint64, match string, count int64) *ScanCmd
+	SScan(key string, cursor uint64, match string, count int64) *ScanCmd
+	HScan(key string, cursor uint64, match string, count int64) *ScanCmd
+	ZScan(key string, cursor uint64, match string, count int64) *ScanCmd
+	Append(key, value string) *IntCmd
+	BitCount(key string, bitCount *BitCount) *IntCmd
+	BitOpAnd(destKey string, keys ...string) *IntCmd
+	BitOpOr(destKey string, keys ...string) *IntCmd
+	BitOpXor(destKey string, keys ...string) *IntCmd
+	BitOpNot(destKey string, key string) *IntCmd
+	BitPos(key string, bit int64, pos ...int64) *IntCmd
+	BitField(key string, args ...interface{}) *IntSliceCmd
+	Decr(key string) *IntCmd
+	DecrBy(key string, decrement int64) *IntCmd
+	Get(key string) *StringCmd
+	GetBit(key string, offset int64) *IntCmd
+	GetRange(key string, start, end int64) *StringCmd
+	GetSet(key string, value interface{}) *StringCmd
+	Incr(key string) *IntCmd
+	IncrBy(key string, value int64) *IntCmd
+	IncrByFloat(key string, value float64) *FloatCmd
+	MGet(keys ...string) *SliceCmd
+	MSet(values ...interface{}) *StatusCmd
+	MSetNX(values ...interface{}) *BoolCmd
+	Set(key string, value interface{}, expiration time.Duration) *StatusCmd
+	SetBit(key string, offset int64, value int) *IntCmd
+	SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd
+	SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd
+	SetRange(key string, offset int64, value string) *IntCmd
+	StrLen(key string) *IntCmd
+	HDel(key string, fields ...string) *IntCmd
+	HExists(key, field string) *BoolCmd
+	HGet(key, field string) *StringCmd
+	HGetAll(key string) *StringStringMapCmd
+	HIncrBy(key, field string, incr int64) *IntCmd
+	HIncrByFloat(key, field string, incr float64) *FloatCmd
+	HKeys(key string) *StringSliceCmd
+	HLen(key string) *IntCmd
+	HMGet(key string, fields ...string) *SliceCmd
+	HMSet(key string, values ...interface{}) *IntCmd
+	HSet(key, field string, value interface{}) *BoolCmd
+	HSetNX(key, field string, value interface{}) *BoolCmd
+	HVals(key string) *StringSliceCmd
+	BLPop(timeout time.Duration, keys ...string) *StringSliceCmd
+	BRPop(timeout time.Duration, keys ...string) *StringSliceCmd
+	BRPopLPush(source, destination string, timeout time.Duration) *StringCmd
+	LIndex(key string, index int64) *StringCmd
+	LInsert(key, op string, pivot, value interface{}) *IntCmd
+	LInsertBefore(key string, pivot, value interface{}) *IntCmd
+	LInsertAfter(key string, pivot, value interface{}) *IntCmd
+	LLen(key string) *IntCmd
+	LPop(key string) *StringCmd
+	LPush(key string, values ...interface{}) *IntCmd
+	LPushX(key string, values ...interface{}) *IntCmd
+	LRange(key string, start, stop int64) *StringSliceCmd
+	LRem(key string, count int64, value interface{}) *IntCmd
+	LSet(key string, index int64, value interface{}) *StatusCmd
+	LTrim(key string, start, stop int64) *StatusCmd
+	RPop(key string) *StringCmd
+	RPopLPush(source, destination string) *StringCmd
+	RPush(key string, values ...interface{}) *IntCmd
+	RPushX(key string, values ...interface{}) *IntCmd
+	SAdd(key string, members ...interface{}) *IntCmd
+	SCard(key string) *IntCmd
+	SDiff(keys ...string) *StringSliceCmd
+	SDiffStore(destination string, keys ...string) *IntCmd
+	SInter(keys ...string) *StringSliceCmd
+	SInterStore(destination string, keys ...string) *IntCmd
+	SIsMember(key string, member interface{}) *BoolCmd
+	SMembers(key string) *StringSliceCmd
+	SMembersMap(key string) *StringStructMapCmd
+	SMove(source, destination string, member interface{}) *BoolCmd
+	SPop(key string) *StringCmd
+	SPopN(key string, count int64) *StringSliceCmd
+	SRandMember(key string) *StringCmd
+	SRandMemberN(key string, count int64) *StringSliceCmd
+	SRem(key string, members ...interface{}) *IntCmd
+	SUnion(keys ...string) *StringSliceCmd
+	SUnionStore(destination string, keys ...string) *IntCmd
+	XAdd(a *XAddArgs) *StringCmd
+	XDel(stream string, ids ...string) *IntCmd
+	XLen(stream string) *IntCmd
+	XRange(stream, start, stop string) *XMessageSliceCmd
+	XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd
+	XRevRange(stream string, start, stop string) *XMessageSliceCmd
+	XRevRangeN(stream string, start, stop string, count int64) *XMessageSliceCmd
+	XRead(a *XReadArgs) *XStreamSliceCmd
+	XReadStreams(streams ...string) *XStreamSliceCmd
+	XGroupCreate(stream, group, start string) *StatusCmd
+	XGroupCreateMkStream(stream, group, start string) *StatusCmd
+	XGroupSetID(stream, group, start string) *StatusCmd
+	XGroupDestroy(stream, group string) *IntCmd
+	XGroupDelConsumer(stream, group, consumer string) *IntCmd
+	XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd
+	XAck(stream, group string, ids ...string) *IntCmd
+	XPending(stream, group string) *XPendingCmd
+	XPendingExt(a *XPendingExtArgs) *XPendingExtCmd
+	XClaim(a *XClaimArgs) *XMessageSliceCmd
+	XClaimJustID(a *XClaimArgs) *StringSliceCmd
+	XTrim(key string, maxLen int64) *IntCmd
+	XTrimApprox(key string, maxLen int64) *IntCmd
+	XInfoGroups(key string) *XInfoGroupsCmd
+	BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd
+	BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd
+	ZAdd(key string, members ...*Z) *IntCmd
+	ZAddNX(key string, members ...*Z) *IntCmd
+	ZAddXX(key string, members ...*Z) *IntCmd
+	ZAddCh(key string, members ...*Z) *IntCmd
+	ZAddNXCh(key string, members ...*Z) *IntCmd
+	ZAddXXCh(key string, members ...*Z) *IntCmd
+	ZIncr(key string, member *Z) *FloatCmd
+	ZIncrNX(key string, member *Z) *FloatCmd
+	ZIncrXX(key string, member *Z) *FloatCmd
+	ZCard(key string) *IntCmd
+	ZCount(key, min, max string) *IntCmd
+	ZLexCount(key, min, max string) *IntCmd
+	ZIncrBy(key string, increment float64, member string) *FloatCmd
+	ZInterStore(destination string, store *ZStore) *IntCmd
+	ZPopMax(key string, count ...int64) *ZSliceCmd
+	ZPopMin(key string, count ...int64) *ZSliceCmd
+	ZRange(key string, start, stop int64) *StringSliceCmd
+	ZRangeWithScores(key string, start, stop int64) *ZSliceCmd
+	ZRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd
+	ZRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd
+	ZRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd
+	ZRank(key, member string) *IntCmd
+	ZRem(key string, members ...interface{}) *IntCmd
+	ZRemRangeByRank(key string, start, stop int64) *IntCmd
+	ZRemRangeByScore(key, min, max string) *IntCmd
+	ZRemRangeByLex(key, min, max string) *IntCmd
+	ZRevRange(key string, start, stop int64) *StringSliceCmd
+	ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd
+	ZRevRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd
+	ZRevRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd
+	ZRevRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd
+	ZRevRank(key, member string) *IntCmd
+	ZScore(key, member string) *FloatCmd
+	ZUnionStore(dest string, store *ZStore) *IntCmd
+	PFAdd(key string, els ...interface{}) *IntCmd
+	PFCount(keys ...string) *IntCmd
+	PFMerge(dest string, keys ...string) *StatusCmd
+	BgRewriteAOF() *StatusCmd
+	BgSave() *StatusCmd
+	ClientKill(ipPort string) *StatusCmd
+	ClientKillByFilter(keys ...string) *IntCmd
+	ClientList() *StringCmd
+	ClientPause(dur time.Duration) *BoolCmd
+	ClientID() *IntCmd
+	ConfigGet(parameter string) *SliceCmd
+	ConfigResetStat() *StatusCmd
+	ConfigSet(parameter, value string) *StatusCmd
+	ConfigRewrite() *StatusCmd
+	DBSize() *IntCmd
+	FlushAll() *StatusCmd
+	FlushAllAsync() *StatusCmd
+	FlushDB() *StatusCmd
+	FlushDBAsync() *StatusCmd
+	Info(section ...string) *StringCmd
+	LastSave() *IntCmd
+	Save() *StatusCmd
+	Shutdown() *StatusCmd
+	ShutdownSave() *StatusCmd
+	ShutdownNoSave() *StatusCmd
+	SlaveOf(host, port string) *StatusCmd
+	Time() *TimeCmd
+	Eval(script string, keys []string, args ...interface{}) *Cmd
+	EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
+	ScriptExists(hashes ...string) *BoolSliceCmd
+	ScriptFlush() *StatusCmd
+	ScriptKill() *StatusCmd
+	ScriptLoad(script string) *StringCmd
+	DebugObject(key string) *StringCmd
+	Publish(channel string, message interface{}) *IntCmd
+	PubSubChannels(pattern string) *StringSliceCmd
+	PubSubNumSub(channels ...string) *StringIntMapCmd
+	PubSubNumPat() *IntCmd
+	ClusterSlots() *ClusterSlotsCmd
+	ClusterNodes() *StringCmd
+	ClusterMeet(host, port string) *StatusCmd
+	ClusterForget(nodeID string) *StatusCmd
+	ClusterReplicate(nodeID string) *StatusCmd
+	ClusterResetSoft() *StatusCmd
+	ClusterResetHard() *StatusCmd
+	ClusterInfo() *StringCmd
+	ClusterKeySlot(key string) *IntCmd
+	ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd
+	ClusterCountFailureReports(nodeID string) *IntCmd
+	ClusterCountKeysInSlot(slot int) *IntCmd
+	ClusterDelSlots(slots ...int) *StatusCmd
+	ClusterDelSlotsRange(min, max int) *StatusCmd
+	ClusterSaveConfig() *StatusCmd
+	ClusterSlaves(nodeID string) *StringSliceCmd
+	ClusterFailover() *StatusCmd
+	ClusterAddSlots(slots ...int) *StatusCmd
+	ClusterAddSlotsRange(min, max int) *StatusCmd
+	GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd
+	GeoPos(key string, members ...string) *GeoPosCmd
+	GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+	GeoRadiusStore(key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+	GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+	GeoRadiusByMemberStore(key, member string, query *GeoRadiusQuery) *IntCmd
+	GeoDist(key string, member1, member2, unit string) *FloatCmd
+	GeoHash(key string, members ...string) *StringSliceCmd
+	ReadOnly() *StatusCmd
+	ReadWrite() *StatusCmd
+	MemoryUsage(key string, samples ...int) *IntCmd
+}
+
+type StatefulCmdable interface {
+	Cmdable
+	Auth(password string) *StatusCmd
+	Select(index int) *StatusCmd
+	SwapDB(index1, index2 int) *StatusCmd
+	ClientSetName(name string) *BoolCmd
+}
+
+var _ Cmdable = (*Client)(nil)
+var _ Cmdable = (*Tx)(nil)
+var _ Cmdable = (*Ring)(nil)
+var _ Cmdable = (*ClusterClient)(nil)
+
+type cmdable func(cmd Cmder) error
+
+type statefulCmdable func(cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(password string) *StatusCmd {
+	cmd := NewStatusCmd("auth", password)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Echo(message interface{}) *StringCmd {
+	cmd := NewStringCmd("echo", message)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Ping() *StatusCmd {
+	cmd := NewStatusCmd("ping")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd {
+	cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond))
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Quit() *StatusCmd {
+	panic("not implemented")
+}
+
+func (c statefulCmdable) Select(index int) *StatusCmd {
+	cmd := NewStatusCmd("select", index)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c statefulCmdable) SwapDB(index1, index2 int) *StatusCmd {
+	cmd := NewStatusCmd("swapdb", index1, index2)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command() *CommandsInfoCmd {
+	cmd := NewCommandsInfoCmd("command")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Del(keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "del"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Unlink(keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "unlink"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Dump(key string) *StringCmd {
+	cmd := NewStringCmd("dump", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Exists(keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "exists"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Expire(key string, expiration time.Duration) *BoolCmd {
+	cmd := NewBoolCmd("expire", key, formatSec(expiration))
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ExpireAt(key string, tm time.Time) *BoolCmd {
+	cmd := NewBoolCmd("expireat", key, tm.Unix())
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Keys(pattern string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("keys", pattern)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Migrate(host, port, key string, db int, timeout time.Duration) *StatusCmd {
+	cmd := NewStatusCmd(
+		"migrate",
+		host,
+		port,
+		key,
+		db,
+		formatMs(timeout),
+	)
+	cmd.setReadTimeout(timeout)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Move(key string, db int) *BoolCmd {
+	cmd := NewBoolCmd("move", key, db)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ObjectRefCount(key string) *IntCmd {
+	cmd := NewIntCmd("object", "refcount", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ObjectEncoding(key string) *StringCmd {
+	cmd := NewStringCmd("object", "encoding", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ObjectIdleTime(key string) *DurationCmd {
+	cmd := NewDurationCmd(time.Second, "object", "idletime", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Persist(key string) *BoolCmd {
+	cmd := NewBoolCmd("persist", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) PExpire(key string, expiration time.Duration) *BoolCmd {
+	cmd := NewBoolCmd("pexpire", key, formatMs(expiration))
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) PExpireAt(key string, tm time.Time) *BoolCmd {
+	cmd := NewBoolCmd(
+		"pexpireat",
+		key,
+		tm.UnixNano()/int64(time.Millisecond),
+	)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) PTTL(key string) *DurationCmd {
+	cmd := NewDurationCmd(time.Millisecond, "pttl", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) RandomKey() *StringCmd {
+	cmd := NewStringCmd("randomkey")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Rename(key, newkey string) *StatusCmd {
+	cmd := NewStatusCmd("rename", key, newkey)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) RenameNX(key, newkey string) *BoolCmd {
+	cmd := NewBoolCmd("renamenx", key, newkey)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd {
+	cmd := NewStatusCmd(
+		"restore",
+		key,
+		formatMs(ttl),
+		value,
+	)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd {
+	cmd := NewStatusCmd(
+		"restore",
+		key,
+		formatMs(ttl),
+		value,
+		"replace",
+	)
+	_ = c(cmd)
+	return cmd
+}
+
+type Sort struct {
+	By            string
+	Offset, Count int64
+	Get           []string
+	Order         string
+	Alpha         bool
+}
+
+func (sort *Sort) args(key string) []interface{} {
+	args := []interface{}{"sort", key}
+	if sort.By != "" {
+		args = append(args, "by", sort.By)
+	}
+	if sort.Offset != 0 || sort.Count != 0 {
+		args = append(args, "limit", sort.Offset, sort.Count)
+	}
+	for _, get := range sort.Get {
+		args = append(args, "get", get)
+	}
+	if sort.Order != "" {
+		args = append(args, sort.Order)
+	}
+	if sort.Alpha {
+		args = append(args, "alpha")
+	}
+	return args
+}
+
+func (c cmdable) Sort(key string, sort *Sort) *StringSliceCmd {
+	cmd := NewStringSliceCmd(sort.args(key)...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SortStore(key, store string, sort *Sort) *IntCmd {
+	args := sort.args(key)
+	if store != "" {
+		args = append(args, "store", store)
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SortInterfaces(key string, sort *Sort) *SliceCmd {
+	cmd := NewSliceCmd(sort.args(key)...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Touch(keys ...string) *IntCmd {
+	args := make([]interface{}, len(keys)+1)
+	args[0] = "touch"
+	for i, key := range keys {
+		args[i+1] = key
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) TTL(key string) *DurationCmd {
+	cmd := NewDurationCmd(time.Second, "ttl", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Type(key string) *StatusCmd {
+	cmd := NewStatusCmd("type", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"scan", cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(c, args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"sscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(c, args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"hscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(c, args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"zscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(c, args...)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Append(key, value string) *IntCmd {
+	cmd := NewIntCmd("append", key, value)
+	_ = c(cmd)
+	return cmd
+}
+
+type BitCount struct {
+	Start, End int64
+}
+
+func (c cmdable) BitCount(key string, bitCount *BitCount) *IntCmd {
+	args := []interface{}{"bitcount", key}
+	if bitCount != nil {
+		args = append(
+			args,
+			bitCount.Start,
+			bitCount.End,
+		)
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) bitOp(op, destKey string, keys ...string) *IntCmd {
+	args := make([]interface{}, 3+len(keys))
+	args[0] = "bitop"
+	args[1] = op
+	args[2] = destKey
+	for i, key := range keys {
+		args[3+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd {
+	return c.bitOp("and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(destKey string, keys ...string) *IntCmd {
+	return c.bitOp("or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(destKey string, keys ...string) *IntCmd {
+	return c.bitOp("xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(destKey string, key string) *IntCmd {
+	return c.bitOp("not", destKey, key)
+}
+
+func (c cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd {
+	args := make([]interface{}, 3+len(pos))
+	args[0] = "bitpos"
+	args[1] = key
+	args[2] = bit
+	switch len(pos) {
+	case 0:
+	case 1:
+		args[3] = pos[0]
+	case 2:
+		args[3] = pos[0]
+		args[4] = pos[1]
+	default:
+		panic("too many arguments")
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) BitField(key string, args ...interface{}) *IntSliceCmd {
+	a := make([]interface{}, 0, 2+len(args))
+	a = append(a, "bitfield")
+	a = append(a, key)
+	a = append(a, args...)
+	cmd := NewIntSliceCmd(a...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Decr(key string) *IntCmd {
+	cmd := NewIntCmd("decr", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) DecrBy(key string, decrement int64) *IntCmd {
+	cmd := NewIntCmd("decrby", key, decrement)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(key string) *StringCmd {
+	cmd := NewStringCmd("get", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) GetBit(key string, offset int64) *IntCmd {
+	cmd := NewIntCmd("getbit", key, offset)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) GetRange(key string, start, end int64) *StringCmd {
+	cmd := NewStringCmd("getrange", key, start, end)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) GetSet(key string, value interface{}) *StringCmd {
+	cmd := NewStringCmd("getset", key, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Incr(key string) *IntCmd {
+	cmd := NewIntCmd("incr", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) IncrBy(key string, value int64) *IntCmd {
+	cmd := NewIntCmd("incrby", key, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) IncrByFloat(key string, value float64) *FloatCmd {
+	cmd := NewFloatCmd("incrbyfloat", key, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) MGet(keys ...string) *SliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "mget"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+//   - MSet("key1", "value1", "key2", "value2")
+//   - MSet([]string{"key1", "value1", "key2", "value2"})
+//   - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSet(values ...interface{}) *StatusCmd {
+	args := make([]interface{}, 1, 1+len(values))
+	args[0] = "mset"
+	args = appendArgs(args, values)
+	cmd := NewStatusCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+//   - MSetNX("key1", "value1", "key2", "value2")
+//   - MSetNX([]string{"key1", "value1", "key2", "value2"})
+//   - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSetNX(values ...interface{}) *BoolCmd {
+	args := make([]interface{}, 1, 1+len(values))
+	args[0] = "msetnx"
+	args = appendArgs(args, values)
+	cmd := NewBoolCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `SET key value [expiration]` command.
+//
+// Use expiration for `SETEX`-like behavior.
+// Zero expiration means the key has no expiration time.
+func (c cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd {
+	args := make([]interface{}, 3, 5)
+	args[0] = "set"
+	args[1] = key
+	args[2] = value
+	if expiration > 0 {
+		if usePrecise(expiration) {
+			args = append(args, "px", formatMs(expiration))
+		} else {
+			args = append(args, "ex", formatSec(expiration))
+		}
+	}
+	cmd := NewStatusCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SetBit(key string, offset int64, value int) *IntCmd {
+	cmd := NewIntCmd(
+		"setbit",
+		key,
+		offset,
+		value,
+	)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+func (c cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd {
+	var cmd *BoolCmd
+	if expiration == 0 {
+		// Use old `SETNX` to support old Redis versions.
+		cmd = NewBoolCmd("setnx", key, value)
+	} else {
+		if usePrecise(expiration) {
+			cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx")
+		} else {
+			cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx")
+		}
+	}
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+func (c cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd {
+	var cmd *BoolCmd
+	if expiration == 0 {
+		cmd = NewBoolCmd("set", key, value, "xx")
+	} else {
+		if usePrecise(expiration) {
+			cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx")
+		} else {
+			cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx")
+		}
+	}
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SetRange(key string, offset int64, value string) *IntCmd {
+	cmd := NewIntCmd("setrange", key, offset, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) StrLen(key string) *IntCmd {
+	cmd := NewIntCmd("strlen", key)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) HDel(key string, fields ...string) *IntCmd {
+	args := make([]interface{}, 2+len(fields))
+	args[0] = "hdel"
+	args[1] = key
+	for i, field := range fields {
+		args[2+i] = field
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HExists(key, field string) *BoolCmd {
+	cmd := NewBoolCmd("hexists", key, field)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HGet(key, field string) *StringCmd {
+	cmd := NewStringCmd("hget", key, field)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HGetAll(key string) *StringStringMapCmd {
+	cmd := NewStringStringMapCmd("hgetall", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HIncrBy(key, field string, incr int64) *IntCmd {
+	cmd := NewIntCmd("hincrby", key, field, incr)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd {
+	cmd := NewFloatCmd("hincrbyfloat", key, field, incr)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HKeys(key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("hkeys", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HLen(key string) *IntCmd {
+	cmd := NewIntCmd("hlen", key)
+	_ = c(cmd)
+	return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(key string, fields ...string) *SliceCmd {
+	args := make([]interface{}, 2+len(fields))
+	args[0] = "hmget"
+	args[1] = key
+	for i, field := range fields {
+		args[2+i] = field
+	}
+	cmd := NewSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+// HMSet is like HSet, but accepts multiple values:
+//   - HMSet("myhash", "key1", "value1", "key2", "value2")
+//   - HMSet("myhash", []string{"key1", "value1", "key2", "value2"})
+//   - HMSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Note that it uses HSET Redis command underneath because HMSET is deprecated.
+func (c cmdable) HMSet(key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "hset"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HSet(key, field string, value interface{}) *BoolCmd {
+	cmd := NewBoolCmd("hset", key, field, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HSetNX(key, field string, value interface{}) *BoolCmd {
+	cmd := NewBoolCmd("hsetnx", key, field, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) HVals(key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("hvals", key)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "blpop"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(args)-1] = formatSec(timeout)
+	cmd := NewStringSliceCmd(args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "brpop"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(keys)+1] = formatSec(timeout)
+	cmd := NewStringSliceCmd(args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd {
+	cmd := NewStringCmd(
+		"brpoplpush",
+		source,
+		destination,
+		formatSec(timeout),
+	)
+	cmd.setReadTimeout(timeout)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LIndex(key string, index int64) *StringCmd {
+	cmd := NewStringCmd("lindex", key, index)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd("linsert", key, op, pivot, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd("linsert", key, "before", pivot, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd("linsert", key, "after", pivot, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LLen(key string) *IntCmd {
+	cmd := NewIntCmd("llen", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LPop(key string) *StringCmd {
+	cmd := NewStringCmd("lpop", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LPush(key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "lpush"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LPushX(key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "lpushx"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LRange(key string, start, stop int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd(
+		"lrange",
+		key,
+		start,
+		stop,
+	)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LRem(key string, count int64, value interface{}) *IntCmd {
+	cmd := NewIntCmd("lrem", key, count, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LSet(key string, index int64, value interface{}) *StatusCmd {
+	cmd := NewStatusCmd("lset", key, index, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LTrim(key string, start, stop int64) *StatusCmd {
+	cmd := NewStatusCmd(
+		"ltrim",
+		key,
+		start,
+		stop,
+	)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) RPop(key string) *StringCmd {
+	cmd := NewStringCmd("rpop", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) RPopLPush(source, destination string) *StringCmd {
+	cmd := NewStringCmd("rpoplpush", source, destination)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) RPush(key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "rpush"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) RPushX(key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "rpushx"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "sadd"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SCard(key string) *IntCmd {
+	cmd := NewIntCmd("scard", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SDiff(keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sdiff"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SDiffStore(destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sdiffstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SInter(keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sinter"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SInterStore(destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sinterstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SIsMember(key string, member interface{}) *BoolCmd {
+	cmd := NewBoolCmd("sismember", key, member)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `SMEMBERS key` command output as a slice
+func (c cmdable) SMembers(key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("smembers", key)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `SMEMBERS key` command output as a map
+func (c cmdable) SMembersMap(key string) *StringStructMapCmd {
+	cmd := NewStringStructMapCmd("smembers", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SMove(source, destination string, member interface{}) *BoolCmd {
+	cmd := NewBoolCmd("smove", source, destination, member)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `SPOP key` command.
+func (c cmdable) SPop(key string) *StringCmd {
+	cmd := NewStringCmd("spop", key)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `SPOP key count` command.
+func (c cmdable) SPopN(key string, count int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd("spop", key, count)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(key string) *StringCmd {
+	cmd := NewStringCmd("srandmember", key)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(key string, count int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd("srandmember", key, count)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SRem(key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "srem"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SUnion(keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sunion"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SUnionStore(destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sunionstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type XAddArgs struct {
+	Stream       string
+	MaxLen       int64 // MAXLEN N
+	MaxLenApprox int64 // MAXLEN ~ N
+	ID           string
+	Values       map[string]interface{}
+}
+
+func (c cmdable) XAdd(a *XAddArgs) *StringCmd {
+	args := make([]interface{}, 0, 6+len(a.Values)*2)
+	args = append(args, "xadd")
+	args = append(args, a.Stream)
+	if a.MaxLen > 0 {
+		args = append(args, "maxlen", a.MaxLen)
+	} else if a.MaxLenApprox > 0 {
+		args = append(args, "maxlen", "~", a.MaxLenApprox)
+	}
+	if a.ID != "" {
+		args = append(args, a.ID)
+	} else {
+		args = append(args, "*")
+	}
+	for k, v := range a.Values {
+		args = append(args, k)
+		args = append(args, v)
+	}
+
+	cmd := NewStringCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XDel(stream string, ids ...string) *IntCmd {
+	args := []interface{}{"xdel", stream}
+	for _, id := range ids {
+		args = append(args, id)
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XLen(stream string) *IntCmd {
+	cmd := NewIntCmd("xlen", stream)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XRange(stream, start, stop string) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd("xrange", stream, start, stop)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd("xrange", stream, start, stop, "count", count)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XRevRange(stream, start, stop string) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XRevRangeN(stream, start, stop string, count int64) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop, "count", count)
+	_ = c(cmd)
+	return cmd
+}
+
+type XReadArgs struct {
+	Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+	Count   int64
+	Block   time.Duration
+}
+
+func (c cmdable) XRead(a *XReadArgs) *XStreamSliceCmd {
+	args := make([]interface{}, 0, 5+len(a.Streams))
+	args = append(args, "xread")
+	if a.Count > 0 {
+		args = append(args, "count")
+		args = append(args, a.Count)
+	}
+	if a.Block >= 0 {
+		args = append(args, "block")
+		args = append(args, int64(a.Block/time.Millisecond))
+	}
+
+	args = append(args, "streams")
+	for _, s := range a.Streams {
+		args = append(args, s)
+	}
+
+	cmd := NewXStreamSliceCmd(args...)
+	if a.Block >= 0 {
+		cmd.setReadTimeout(a.Block)
+	}
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XReadStreams(streams ...string) *XStreamSliceCmd {
+	return c.XRead(&XReadArgs{
+		Streams: streams,
+		Block:   -1,
+	})
+}
+
+func (c cmdable) XGroupCreate(stream, group, start string) *StatusCmd {
+	cmd := NewStatusCmd("xgroup", "create", stream, group, start)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(stream, group, start string) *StatusCmd {
+	cmd := NewStatusCmd("xgroup", "create", stream, group, start, "mkstream")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupSetID(stream, group, start string) *StatusCmd {
+	cmd := NewStatusCmd("xgroup", "setid", stream, group, start)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupDestroy(stream, group string) *IntCmd {
+	cmd := NewIntCmd("xgroup", "destroy", stream, group)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(stream, group, consumer string) *IntCmd {
+	cmd := NewIntCmd("xgroup", "delconsumer", stream, group, consumer)
+	_ = c(cmd)
+	return cmd
+}
+
+type XReadGroupArgs struct {
+	Group    string
+	Consumer string
+	Streams  []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+	Count    int64
+	Block    time.Duration
+	NoAck    bool
+}
+
+func (c cmdable) XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd {
+	args := make([]interface{}, 0, 8+len(a.Streams))
+	args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+	if a.Count > 0 {
+		args = append(args, "count", a.Count)
+	}
+	if a.Block >= 0 {
+		args = append(args, "block", int64(a.Block/time.Millisecond))
+	}
+	if a.NoAck {
+		args = append(args, "noack")
+	}
+	args = append(args, "streams")
+	for _, s := range a.Streams {
+		args = append(args, s)
+	}
+
+	cmd := NewXStreamSliceCmd(args...)
+	if a.Block >= 0 {
+		cmd.setReadTimeout(a.Block)
+	}
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XAck(stream, group string, ids ...string) *IntCmd {
+	args := []interface{}{"xack", stream, group}
+	for _, id := range ids {
+		args = append(args, id)
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XPending(stream, group string) *XPendingCmd {
+	cmd := NewXPendingCmd("xpending", stream, group)
+	_ = c(cmd)
+	return cmd
+}
+
+type XPendingExtArgs struct {
+	Stream   string
+	Group    string
+	Start    string
+	End      string
+	Count    int64
+	Consumer string
+}
+
+func (c cmdable) XPendingExt(a *XPendingExtArgs) *XPendingExtCmd {
+	args := make([]interface{}, 0, 7)
+	args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
+	if a.Consumer != "" {
+		args = append(args, a.Consumer)
+	}
+	cmd := NewXPendingExtCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+type XClaimArgs struct {
+	Stream   string
+	Group    string
+	Consumer string
+	MinIdle  time.Duration
+	Messages []string
+}
+
+func (c cmdable) XClaim(a *XClaimArgs) *XMessageSliceCmd {
+	args := xClaimArgs(a)
+	cmd := NewXMessageSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XClaimJustID(a *XClaimArgs) *StringSliceCmd {
+	args := xClaimArgs(a)
+	args = append(args, "justid")
+	cmd := NewStringSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+	args := make([]interface{}, 0, 4+len(a.Messages))
+	args = append(args,
+		"xclaim",
+		a.Stream,
+		a.Group, a.Consumer,
+		int64(a.MinIdle/time.Millisecond))
+	for _, id := range a.Messages {
+		args = append(args, id)
+	}
+	return args
+}
+
+func (c cmdable) XTrim(key string, maxLen int64) *IntCmd {
+	cmd := NewIntCmd("xtrim", key, "maxlen", maxLen)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XTrimApprox(key string, maxLen int64) *IntCmd {
+	cmd := NewIntCmd("xtrim", key, "maxlen", "~", maxLen)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) XInfoGroups(key string) *XInfoGroupsCmd {
+	cmd := NewXInfoGroupsCmd(key)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+	Score  float64
+	Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+	Z
+	Key string
+}
+
+// ZStore is used as an arg to ZInterStore and ZUnionStore.
+type ZStore struct {
+	Keys    []string
+	Weights []float64
+	// Can be SUM, MIN or MAX.
+	Aggregate string
+}
+
+// Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "bzpopmax"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(args)-1] = formatSec(timeout)
+	cmd := NewZWithKeyCmd(args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "bzpopmin"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(args)-1] = formatSec(timeout)
+	cmd := NewZWithKeyCmd(args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) zAdd(a []interface{}, n int, members ...*Z) *IntCmd {
+	for i, m := range members {
+		a[n+2*i] = m.Score
+		a[n+2*i+1] = m.Member
+	}
+	cmd := NewIntCmd(a...)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(key string, members ...*Z) *IntCmd {
+	const n = 2
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1] = "zadd", key
+	return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(key string, members ...*Z) *IntCmd {
+	const n = 3
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1], a[2] = "zadd", key, "nx"
+	return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(key string, members ...*Z) *IntCmd {
+	const n = 3
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1], a[2] = "zadd", key, "xx"
+	return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key CH score member [score member ...]` command.
+func (c cmdable) ZAddCh(key string, members ...*Z) *IntCmd {
+	const n = 3
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1], a[2] = "zadd", key, "ch"
+	return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key NX CH score member [score member ...]` command.
+func (c cmdable) ZAddNXCh(key string, members ...*Z) *IntCmd {
+	const n = 4
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
+	return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key XX CH score member [score member ...]` command.
+func (c cmdable) ZAddXXCh(key string, members ...*Z) *IntCmd {
+	const n = 4
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
+	return c.zAdd(a, n, members...)
+}
+
+func (c cmdable) zIncr(a []interface{}, n int, members ...*Z) *FloatCmd {
+	for i, m := range members {
+		a[n+2*i] = m.Score
+		a[n+2*i+1] = m.Member
+	}
+	cmd := NewFloatCmd(a...)
+	_ = c(cmd)
+	return cmd
+}
+
+// Redis `ZADD key INCR score member` command.
+func (c cmdable) ZIncr(key string, member *Z) *FloatCmd {
+	const n = 3
+	a := make([]interface{}, n+2)
+	a[0], a[1], a[2] = "zadd", key, "incr"
+	return c.zIncr(a, n, member)
+}
+
+// Redis `ZADD key NX INCR score member` command.
+func (c cmdable) ZIncrNX(key string, member *Z) *FloatCmd {
+	const n = 4
+	a := make([]interface{}, n+2)
+	a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
+	return c.zIncr(a, n, member)
+}
+
+// Redis `ZADD key XX INCR score member` command.
+func (c cmdable) ZIncrXX(key string, member *Z) *FloatCmd {
+	const n = 4
+	a := make([]interface{}, n+2)
+	a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
+	return c.zIncr(a, n, member)
+}
+
+func (c cmdable) ZCard(key string) *IntCmd {
+	cmd := NewIntCmd("zcard", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZCount(key, min, max string) *IntCmd {
+	cmd := NewIntCmd("zcount", key, min, max)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZLexCount(key, min, max string) *IntCmd {
+	cmd := NewIntCmd("zlexcount", key, min, max)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd {
+	cmd := NewFloatCmd("zincrby", key, increment, member)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZInterStore(destination string, store *ZStore) *IntCmd {
+	args := make([]interface{}, 3+len(store.Keys))
+	args[0] = "zinterstore"
+	args[1] = destination
+	args[2] = len(store.Keys)
+	for i, key := range store.Keys {
+		args[3+i] = key
+	}
+	if len(store.Weights) > 0 {
+		args = append(args, "weights")
+		for _, weight := range store.Weights {
+			args = append(args, weight)
+		}
+	}
+	if store.Aggregate != "" {
+		args = append(args, "aggregate", store.Aggregate)
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZPopMax(key string, count ...int64) *ZSliceCmd {
+	args := []interface{}{
+		"zpopmax",
+		key,
+	}
+
+	switch len(count) {
+	case 0:
+		break
+	case 1:
+		args = append(args, count[0])
+	default:
+		panic("too many arguments")
+	}
+
+	cmd := NewZSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZPopMin(key string, count ...int64) *ZSliceCmd {
+	args := []interface{}{
+		"zpopmin",
+		key,
+	}
+
+	switch len(count) {
+	case 0:
+		break
+	case 1:
+		args = append(args, count[0])
+	default:
+		panic("too many arguments")
+	}
+
+	cmd := NewZSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd {
+	args := []interface{}{
+		"zrange",
+		key,
+		start,
+		stop,
+	}
+	if withScores {
+		args = append(args, "withscores")
+	}
+	cmd := NewStringSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRange(key string, start, stop int64) *StringSliceCmd {
+	return c.zRange(key, start, stop, false)
+}
+
+func (c cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+	cmd := NewZSliceCmd("zrange", key, start, stop, "withscores")
+	_ = c(cmd)
+	return cmd
+}
+
+type ZRangeBy struct {
+	Min, Max      string
+	Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+	args := []interface{}{zcmd, key, opt.Min, opt.Max}
+	if withScores {
+		args = append(args, "withscores")
+	}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewStringSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRangeBy("zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRangeBy("zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd {
+	args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewZSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRank(key, member string) *IntCmd {
+	cmd := NewIntCmd("zrank", key, member)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRem(key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "zrem"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd {
+	cmd := NewIntCmd(
+		"zremrangebyrank",
+		key,
+		start,
+		stop,
+	)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(key, min, max string) *IntCmd {
+	cmd := NewIntCmd("zremrangebyscore", key, min, max)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(key, min, max string) *IntCmd {
+	cmd := NewIntCmd("zremrangebylex", key, min, max)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd("zrevrange", key, start, stop)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+	cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) zRevRangeBy(zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+	args := []interface{}{zcmd, key, opt.Max, opt.Min}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewStringSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRevRangeBy("zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRevRangeBy("zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd {
+	args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewZSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRank(key, member string) *IntCmd {
+	cmd := NewIntCmd("zrevrank", key, member)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZScore(key, member string) *FloatCmd {
+	cmd := NewFloatCmd("zscore", key, member)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ZUnionStore(dest string, store *ZStore) *IntCmd {
+	args := make([]interface{}, 3+len(store.Keys))
+	args[0] = "zunionstore"
+	args[1] = dest
+	args[2] = len(store.Keys)
+	for i, key := range store.Keys {
+		args[3+i] = key
+	}
+	if len(store.Weights) > 0 {
+		args = append(args, "weights")
+		for _, weight := range store.Weights {
+			args = append(args, weight)
+		}
+	}
+	if store.Aggregate != "" {
+		args = append(args, "aggregate", store.Aggregate)
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) PFAdd(key string, els ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(els))
+	args[0] = "pfadd"
+	args[1] = key
+	args = appendArgs(args, els)
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) PFCount(keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "pfcount"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) PFMerge(dest string, keys ...string) *StatusCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "pfmerge"
+	args[1] = dest
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewStatusCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF() *StatusCmd {
+	cmd := NewStatusCmd("bgrewriteaof")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) BgSave() *StatusCmd {
+	cmd := NewStatusCmd("bgsave")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClientKill(ipPort string) *StatusCmd {
+	cmd := NewStatusCmd("client", "kill", ipPort)
+	_ = c(cmd)
+	return cmd
+}
+
+// ClientKillByFilter is new style synx, while the ClientKill is old
+// CLIENT KILL <option> [value] ... <option> [value]
+func (c cmdable) ClientKillByFilter(keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "client"
+	args[1] = "kill"
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClientList() *StringCmd {
+	cmd := NewStringCmd("client", "list")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClientPause(dur time.Duration) *BoolCmd {
+	cmd := NewBoolCmd("client", "pause", formatMs(dur))
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClientID() *IntCmd {
+	cmd := NewIntCmd("client", "id")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClientUnblock(id int64) *IntCmd {
+	cmd := NewIntCmd("client", "unblock", id)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(id int64) *IntCmd {
+	cmd := NewIntCmd("client", "unblock", id, "error")
+	_ = c(cmd)
+	return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(name string) *BoolCmd {
+	cmd := NewBoolCmd("client", "setname", name)
+	_ = c(cmd)
+	return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName() *StringCmd {
+	cmd := NewStringCmd("client", "getname")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigGet(parameter string) *SliceCmd {
+	cmd := NewSliceCmd("config", "get", parameter)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigResetStat() *StatusCmd {
+	cmd := NewStatusCmd("config", "resetstat")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigSet(parameter, value string) *StatusCmd {
+	cmd := NewStatusCmd("config", "set", parameter, value)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigRewrite() *StatusCmd {
+	cmd := NewStatusCmd("config", "rewrite")
+	_ = c(cmd)
+	return cmd
+}
+
+// Deperecated. Use DBSize instead.
+func (c cmdable) DbSize() *IntCmd {
+	return c.DBSize()
+}
+
+func (c cmdable) DBSize() *IntCmd {
+	cmd := NewIntCmd("dbsize")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) FlushAll() *StatusCmd {
+	cmd := NewStatusCmd("flushall")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) FlushAllAsync() *StatusCmd {
+	cmd := NewStatusCmd("flushall", "async")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) FlushDB() *StatusCmd {
+	cmd := NewStatusCmd("flushdb")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) FlushDBAsync() *StatusCmd {
+	cmd := NewStatusCmd("flushdb", "async")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Info(section ...string) *StringCmd {
+	args := []interface{}{"info"}
+	if len(section) > 0 {
+		args = append(args, section[0])
+	}
+	cmd := NewStringCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) LastSave() *IntCmd {
+	cmd := NewIntCmd("lastsave")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) Save() *StatusCmd {
+	cmd := NewStatusCmd("save")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) shutdown(modifier string) *StatusCmd {
+	var args []interface{}
+	if modifier == "" {
+		args = []interface{}{"shutdown"}
+	} else {
+		args = []interface{}{"shutdown", modifier}
+	}
+	cmd := NewStatusCmd(args...)
+	_ = c(cmd)
+	if err := cmd.Err(); err != nil {
+		if err == io.EOF {
+			// Server quit as expected.
+			cmd.err = nil
+		}
+	} else {
+		// Server did not quit. String reply contains the reason.
+		cmd.err = errors.New(cmd.val)
+		cmd.val = ""
+	}
+	return cmd
+}
+
+func (c cmdable) Shutdown() *StatusCmd {
+	return c.shutdown("")
+}
+
+func (c cmdable) ShutdownSave() *StatusCmd {
+	return c.shutdown("save")
+}
+
+func (c cmdable) ShutdownNoSave() *StatusCmd {
+	return c.shutdown("nosave")
+}
+
+func (c cmdable) SlaveOf(host, port string) *StatusCmd {
+	cmd := NewStatusCmd("slaveof", host, port)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) SlowLog() {
+	panic("not implemented")
+}
+
+func (c cmdable) Sync() {
+	panic("not implemented")
+}
+
+func (c cmdable) Time() *TimeCmd {
+	cmd := NewTimeCmd("time")
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Eval(script string, keys []string, args ...interface{}) *Cmd {
+	cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+	cmdArgs[0] = "eval"
+	cmdArgs[1] = script
+	cmdArgs[2] = len(keys)
+	for i, key := range keys {
+		cmdArgs[3+i] = key
+	}
+	cmdArgs = appendArgs(cmdArgs, args)
+	cmd := NewCmd(cmdArgs...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd {
+	cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+	cmdArgs[0] = "evalsha"
+	cmdArgs[1] = sha1
+	cmdArgs[2] = len(keys)
+	for i, key := range keys {
+		cmdArgs[3+i] = key
+	}
+	cmdArgs = appendArgs(cmdArgs, args)
+	cmd := NewCmd(cmdArgs...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptExists(hashes ...string) *BoolSliceCmd {
+	args := make([]interface{}, 2+len(hashes))
+	args[0] = "script"
+	args[1] = "exists"
+	for i, hash := range hashes {
+		args[2+i] = hash
+	}
+	cmd := NewBoolSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptFlush() *StatusCmd {
+	cmd := NewStatusCmd("script", "flush")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptKill() *StatusCmd {
+	cmd := NewStatusCmd("script", "kill")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptLoad(script string) *StringCmd {
+	cmd := NewStringCmd("script", "load", script)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) DebugObject(key string) *StringCmd {
+	cmd := NewStringCmd("debug", "object", key)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(channel string, message interface{}) *IntCmd {
+	cmd := NewIntCmd("publish", channel, message)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) PubSubChannels(pattern string) *StringSliceCmd {
+	args := []interface{}{"pubsub", "channels"}
+	if pattern != "*" {
+		args = append(args, pattern)
+	}
+	cmd := NewStringSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) PubSubNumSub(channels ...string) *StringIntMapCmd {
+	args := make([]interface{}, 2+len(channels))
+	args[0] = "pubsub"
+	args[1] = "numsub"
+	for i, channel := range channels {
+		args[2+i] = channel
+	}
+	cmd := NewStringIntMapCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) PubSubNumPat() *IntCmd {
+	cmd := NewIntCmd("pubsub", "numpat")
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) ClusterSlots() *ClusterSlotsCmd {
+	cmd := NewClusterSlotsCmd("cluster", "slots")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterNodes() *StringCmd {
+	cmd := NewStringCmd("cluster", "nodes")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterMeet(host, port string) *StatusCmd {
+	cmd := NewStatusCmd("cluster", "meet", host, port)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterForget(nodeID string) *StatusCmd {
+	cmd := NewStatusCmd("cluster", "forget", nodeID)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterReplicate(nodeID string) *StatusCmd {
+	cmd := NewStatusCmd("cluster", "replicate", nodeID)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterResetSoft() *StatusCmd {
+	cmd := NewStatusCmd("cluster", "reset", "soft")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterResetHard() *StatusCmd {
+	cmd := NewStatusCmd("cluster", "reset", "hard")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterInfo() *StringCmd {
+	cmd := NewStringCmd("cluster", "info")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterKeySlot(key string) *IntCmd {
+	cmd := NewIntCmd("cluster", "keyslot", key)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd("cluster", "getkeysinslot", slot, count)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(nodeID string) *IntCmd {
+	cmd := NewIntCmd("cluster", "count-failure-reports", nodeID)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(slot int) *IntCmd {
+	cmd := NewIntCmd("cluster", "countkeysinslot", slot)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterDelSlots(slots ...int) *StatusCmd {
+	args := make([]interface{}, 2+len(slots))
+	args[0] = "cluster"
+	args[1] = "delslots"
+	for i, slot := range slots {
+		args[2+i] = slot
+	}
+	cmd := NewStatusCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(min, max int) *StatusCmd {
+	size := max - min + 1
+	slots := make([]int, size)
+	for i := 0; i < size; i++ {
+		slots[i] = min + i
+	}
+	return c.ClusterDelSlots(slots...)
+}
+
+func (c cmdable) ClusterSaveConfig() *StatusCmd {
+	cmd := NewStatusCmd("cluster", "saveconfig")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterSlaves(nodeID string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("cluster", "slaves", nodeID)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ReadOnly() *StatusCmd {
+	cmd := NewStatusCmd("readonly")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ReadWrite() *StatusCmd {
+	cmd := NewStatusCmd("readwrite")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterFailover() *StatusCmd {
+	cmd := NewStatusCmd("cluster", "failover")
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterAddSlots(slots ...int) *StatusCmd {
+	args := make([]interface{}, 2+len(slots))
+	args[0] = "cluster"
+	args[1] = "addslots"
+	for i, num := range slots {
+		args[2+i] = num
+	}
+	cmd := NewStatusCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(min, max int) *StatusCmd {
+	size := max - min + 1
+	slots := make([]int, size)
+	for i := 0; i < size; i++ {
+		slots[i] = min + i
+	}
+	return c.ClusterAddSlots(slots...)
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd {
+	args := make([]interface{}, 2+3*len(geoLocation))
+	args[0] = "geoadd"
+	args[1] = key
+	for i, eachLoc := range geoLocation {
+		args[2+3*i] = eachLoc.Longitude
+		args[2+3*i+1] = eachLoc.Latitude
+		args[2+3*i+2] = eachLoc.Name
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd {
+	cmd := NewGeoLocationCmd(query, "georadius_ro", key, longitude, latitude)
+	if query.Store != "" || query.StoreDist != "" {
+		cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+		return cmd
+	}
+	_ = c(cmd)
+	return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd {
+	args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+	cmd := NewIntCmd(args...)
+	if query.Store == "" && query.StoreDist == "" {
+		cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+		return cmd
+	}
+	_ = c(cmd)
+	return cmd
+}
+
+// GeoRadius is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd {
+	cmd := NewGeoLocationCmd(query, "georadiusbymember_ro", key, member)
+	if query.Store != "" || query.StoreDist != "" {
+		cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+		return cmd
+	}
+	_ = c(cmd)
+	return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(key, member string, query *GeoRadiusQuery) *IntCmd {
+	args := geoLocationArgs(query, "georadiusbymember", key, member)
+	cmd := NewIntCmd(args...)
+	if query.Store == "" && query.StoreDist == "" {
+		cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+		return cmd
+	}
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) GeoDist(key string, member1, member2, unit string) *FloatCmd {
+	if unit == "" {
+		unit = "km"
+	}
+	cmd := NewFloatCmd("geodist", key, member1, member2, unit)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) GeoHash(key string, members ...string) *StringSliceCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "geohash"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewStringSliceCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+func (c cmdable) GeoPos(key string, members ...string) *GeoPosCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "geopos"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewGeoPosCmd(args...)
+	_ = c(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) MemoryUsage(key string, samples ...int) *IntCmd {
+	args := []interface{}{"memory", "usage", key}
+	if len(samples) > 0 {
+		if len(samples) != 1 {
+			panic("MemoryUsage expects single sample count")
+		}
+		args = append(args, "SAMPLES", samples[0])
+	}
+	cmd := NewIntCmd(args...)
+	_ = c(cmd)
+	return cmd
+}

+ 4042 - 0
commands_test.go

@@ -0,0 +1,4042 @@
+package redis_test
+
+import (
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"time"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+
+	"github.com/go-redis/redis/v7"
+	"github.com/go-redis/redis/v7/internal/proto"
+)
+
+var _ = Describe("Commands", func() {
+	var client *redis.Client
+
+	BeforeEach(func() {
+		client = redis.NewClient(redisOptions())
+		Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	Describe("server", func() {
+
+		It("should Auth", func() {
+			cmds, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+				pipe.Auth("password")
+				pipe.Auth("")
+				return nil
+			})
+			Expect(err).To(MatchError("ERR Client sent AUTH, but no password is set"))
+			Expect(cmds[0].Err()).To(MatchError("ERR Client sent AUTH, but no password is set"))
+			Expect(cmds[1].Err()).To(MatchError("ERR Client sent AUTH, but no password is set"))
+
+			stats := client.PoolStats()
+			Expect(stats.Hits).To(Equal(uint32(1)))
+			Expect(stats.Misses).To(Equal(uint32(1)))
+			Expect(stats.Timeouts).To(Equal(uint32(0)))
+			Expect(stats.TotalConns).To(Equal(uint32(1)))
+			Expect(stats.IdleConns).To(Equal(uint32(1)))
+		})
+
+		It("should Echo", func() {
+			pipe := client.Pipeline()
+			echo := pipe.Echo("hello")
+			_, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+
+			Expect(echo.Err()).NotTo(HaveOccurred())
+			Expect(echo.Val()).To(Equal("hello"))
+		})
+
+		It("should Ping", func() {
+			ping := client.Ping()
+			Expect(ping.Err()).NotTo(HaveOccurred())
+			Expect(ping.Val()).To(Equal("PONG"))
+		})
+
+		It("should Wait", func() {
+			const wait = 3 * time.Second
+
+			// assume testing on single redis instance
+			start := time.Now()
+			val, err := client.Wait(1, wait).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal(int64(0)))
+			Expect(time.Now()).To(BeTemporally("~", start.Add(wait), 3*time.Second))
+		})
+
+		It("should Select", func() {
+			pipe := client.Pipeline()
+			sel := pipe.Select(1)
+			_, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+
+			Expect(sel.Err()).NotTo(HaveOccurred())
+			Expect(sel.Val()).To(Equal("OK"))
+		})
+
+		It("should SwapDB", func() {
+			pipe := client.Pipeline()
+			sel := pipe.SwapDB(1, 2)
+			_, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+
+			Expect(sel.Err()).NotTo(HaveOccurred())
+			Expect(sel.Val()).To(Equal("OK"))
+		})
+
+		It("should BgRewriteAOF", func() {
+			Skip("flaky test")
+
+			val, err := client.BgRewriteAOF().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(ContainSubstring("Background append only file rewriting"))
+		})
+
+		It("should BgSave", func() {
+			Skip("flaky test")
+
+			// workaround for "ERR Can't BGSAVE while AOF log rewriting is in progress"
+			Eventually(func() string {
+				return client.BgSave().Val()
+			}, "30s").Should(Equal("Background saving started"))
+		})
+
+		It("should ClientKill", func() {
+			r := client.ClientKill("1.1.1.1:1111")
+			Expect(r.Err()).To(MatchError("ERR No such client"))
+			Expect(r.Val()).To(Equal(""))
+		})
+
+		It("should ClientKillByFilter", func() {
+			r := client.ClientKillByFilter("TYPE", "test")
+			Expect(r.Err()).To(MatchError("ERR Unknown client type 'test'"))
+			Expect(r.Val()).To(Equal(int64(0)))
+		})
+
+		It("should ClientID", func() {
+			err := client.ClientID().Err()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(client.ClientID().Val()).To(BeNumerically(">=", 0))
+		})
+
+		It("should ClientUnblock", func() {
+			id := client.ClientID().Val()
+			r, err := client.ClientUnblock(id).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(r).To(Equal(int64(0)))
+		})
+
+		It("should ClientUnblockWithError", func() {
+			id := client.ClientID().Val()
+			r, err := client.ClientUnblockWithError(id).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(r).To(Equal(int64(0)))
+		})
+
+		It("should ClientPause", func() {
+			err := client.ClientPause(time.Second).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			start := time.Now()
+			err = client.Ping().Err()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(time.Now()).To(BeTemporally("~", start.Add(time.Second), 800*time.Millisecond))
+		})
+
+		It("should ClientSetName and ClientGetName", func() {
+			pipe := client.Pipeline()
+			set := pipe.ClientSetName("theclientname")
+			get := pipe.ClientGetName()
+			_, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(BeTrue())
+
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("theclientname"))
+		})
+
+		It("should ConfigGet", func() {
+			val, err := client.ConfigGet("*").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).NotTo(BeEmpty())
+		})
+
+		It("should ConfigResetStat", func() {
+			r := client.ConfigResetStat()
+			Expect(r.Err()).NotTo(HaveOccurred())
+			Expect(r.Val()).To(Equal("OK"))
+		})
+
+		It("should ConfigSet", func() {
+			configGet := client.ConfigGet("maxmemory")
+			Expect(configGet.Err()).NotTo(HaveOccurred())
+			Expect(configGet.Val()).To(HaveLen(2))
+			Expect(configGet.Val()[0]).To(Equal("maxmemory"))
+
+			configSet := client.ConfigSet("maxmemory", configGet.Val()[1].(string))
+			Expect(configSet.Err()).NotTo(HaveOccurred())
+			Expect(configSet.Val()).To(Equal("OK"))
+		})
+
+		It("should ConfigRewrite", func() {
+			configRewrite := client.ConfigRewrite()
+			Expect(configRewrite.Err()).NotTo(HaveOccurred())
+			Expect(configRewrite.Val()).To(Equal("OK"))
+		})
+
+		It("should DBSize", func() {
+			size, err := client.DBSize().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(0)))
+		})
+
+		It("should Info", func() {
+			info := client.Info()
+			Expect(info.Err()).NotTo(HaveOccurred())
+			Expect(info.Val()).NotTo(Equal(""))
+		})
+
+		It("should Info cpu", func() {
+			info := client.Info("cpu")
+			Expect(info.Err()).NotTo(HaveOccurred())
+			Expect(info.Val()).NotTo(Equal(""))
+			Expect(info.Val()).To(ContainSubstring(`used_cpu_sys`))
+		})
+
+		It("should LastSave", func() {
+			lastSave := client.LastSave()
+			Expect(lastSave.Err()).NotTo(HaveOccurred())
+			Expect(lastSave.Val()).NotTo(Equal(0))
+		})
+
+		It("should Save", func() {
+			// workaround for "ERR Background save already in progress"
+			Eventually(func() string {
+				return client.Save().Val()
+			}, "10s").Should(Equal("OK"))
+		})
+
+		It("should SlaveOf", func() {
+			slaveOf := client.SlaveOf("localhost", "8888")
+			Expect(slaveOf.Err()).NotTo(HaveOccurred())
+			Expect(slaveOf.Val()).To(Equal("OK"))
+
+			slaveOf = client.SlaveOf("NO", "ONE")
+			Expect(slaveOf.Err()).NotTo(HaveOccurred())
+			Expect(slaveOf.Val()).To(Equal("OK"))
+		})
+
+		It("should Time", func() {
+			tm, err := client.Time().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(tm).To(BeTemporally("~", time.Now(), 3*time.Second))
+		})
+
+		It("should Command", func() {
+			cmds, err := client.Command().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(len(cmds)).To(BeNumerically("~", 200, 20))
+
+			cmd := cmds["mget"]
+			Expect(cmd.Name).To(Equal("mget"))
+			Expect(cmd.Arity).To(Equal(int8(-2)))
+			Expect(cmd.Flags).To(ContainElement("readonly"))
+			Expect(cmd.FirstKeyPos).To(Equal(int8(1)))
+			Expect(cmd.LastKeyPos).To(Equal(int8(-1)))
+			Expect(cmd.StepCount).To(Equal(int8(1)))
+
+			cmd = cmds["ping"]
+			Expect(cmd.Name).To(Equal("ping"))
+			Expect(cmd.Arity).To(Equal(int8(-1)))
+			Expect(cmd.Flags).To(ContainElement("stale"))
+			Expect(cmd.Flags).To(ContainElement("fast"))
+			Expect(cmd.FirstKeyPos).To(Equal(int8(0)))
+			Expect(cmd.LastKeyPos).To(Equal(int8(0)))
+			Expect(cmd.StepCount).To(Equal(int8(0)))
+		})
+
+	})
+
+	Describe("debugging", func() {
+
+		It("should DebugObject", func() {
+			err := client.DebugObject("foo").Err()
+			Expect(err).To(MatchError("ERR no such key"))
+
+			err = client.Set("foo", "bar", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			s, err := client.DebugObject("foo").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(s).To(ContainSubstring("serializedlength:4"))
+		})
+
+		It("should MemoryUsage", func() {
+			err := client.MemoryUsage("foo").Err()
+			Expect(err).To(Equal(redis.Nil))
+
+			err = client.Set("foo", "bar", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			n, err := client.MemoryUsage("foo").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(50)))
+
+			n, err = client.MemoryUsage("foo", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(50)))
+		})
+
+	})
+
+	Describe("keys", func() {
+
+		It("should Del", func() {
+			err := client.Set("key1", "Hello", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.Set("key2", "World", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			n, err := client.Del("key1", "key2", "key3").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(2)))
+		})
+
+		It("should Unlink", func() {
+			err := client.Set("key1", "Hello", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.Set("key2", "World", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			n, err := client.Unlink("key1", "key2", "key3").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(2)))
+		})
+
+		It("should Dump", func() {
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			dump := client.Dump("key")
+			Expect(dump.Err()).NotTo(HaveOccurred())
+			Expect(dump.Val()).NotTo(BeEmpty())
+		})
+
+		It("should Exists", func() {
+			set := client.Set("key1", "Hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			n, err := client.Exists("key1").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(1)))
+
+			n, err = client.Exists("key2").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(0)))
+
+			n, err = client.Exists("key1", "key2").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(1)))
+
+			n, err = client.Exists("key1", "key1").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(2)))
+		})
+
+		It("should Expire", func() {
+			set := client.Set("key", "Hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			expire := client.Expire("key", 10*time.Second)
+			Expect(expire.Err()).NotTo(HaveOccurred())
+			Expect(expire.Val()).To(Equal(true))
+
+			ttl := client.TTL("key")
+			Expect(ttl.Err()).NotTo(HaveOccurred())
+			Expect(ttl.Val()).To(Equal(10 * time.Second))
+
+			set = client.Set("key", "Hello World", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			ttl = client.TTL("key")
+			Expect(ttl.Err()).NotTo(HaveOccurred())
+			Expect(ttl.Val()).To(Equal(time.Duration(-1)))
+
+			ttl = client.TTL("nonexistent_key")
+			Expect(ttl.Err()).NotTo(HaveOccurred())
+			Expect(ttl.Val()).To(Equal(time.Duration(-2)))
+		})
+
+		It("should ExpireAt", func() {
+			set := client.Set("key", "Hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			n, err := client.Exists("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(1)))
+
+			expireAt := client.ExpireAt("key", time.Now().Add(-time.Hour))
+			Expect(expireAt.Err()).NotTo(HaveOccurred())
+			Expect(expireAt.Val()).To(Equal(true))
+
+			n, err = client.Exists("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(0)))
+		})
+
+		It("should Keys", func() {
+			mset := client.MSet("one", "1", "two", "2", "three", "3", "four", "4")
+			Expect(mset.Err()).NotTo(HaveOccurred())
+			Expect(mset.Val()).To(Equal("OK"))
+
+			keys := client.Keys("*o*")
+			Expect(keys.Err()).NotTo(HaveOccurred())
+			Expect(keys.Val()).To(ConsistOf([]string{"four", "one", "two"}))
+
+			keys = client.Keys("t??")
+			Expect(keys.Err()).NotTo(HaveOccurred())
+			Expect(keys.Val()).To(Equal([]string{"two"}))
+
+			keys = client.Keys("*")
+			Expect(keys.Err()).NotTo(HaveOccurred())
+			Expect(keys.Val()).To(ConsistOf([]string{"four", "one", "three", "two"}))
+		})
+
+		It("should Migrate", func() {
+			migrate := client.Migrate("localhost", redisSecondaryPort, "key", 0, 0)
+			Expect(migrate.Err()).NotTo(HaveOccurred())
+			Expect(migrate.Val()).To(Equal("NOKEY"))
+
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			migrate = client.Migrate("localhost", redisSecondaryPort, "key", 0, 0)
+			Expect(migrate.Err()).To(MatchError("IOERR error or timeout writing to target instance"))
+			Expect(migrate.Val()).To(Equal(""))
+		})
+
+		It("should Move", func() {
+			move := client.Move("key", 2)
+			Expect(move.Err()).NotTo(HaveOccurred())
+			Expect(move.Val()).To(Equal(false))
+
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			move = client.Move("key", 2)
+			Expect(move.Err()).NotTo(HaveOccurred())
+			Expect(move.Val()).To(Equal(true))
+
+			get := client.Get("key")
+			Expect(get.Err()).To(Equal(redis.Nil))
+			Expect(get.Val()).To(Equal(""))
+
+			pipe := client.Pipeline()
+			pipe.Select(2)
+			get = pipe.Get("key")
+			pipe.FlushDB()
+
+			_, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("hello"))
+		})
+
+		It("should Object", func() {
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			refCount := client.ObjectRefCount("key")
+			Expect(refCount.Err()).NotTo(HaveOccurred())
+			Expect(refCount.Val()).To(Equal(int64(1)))
+
+			err := client.ObjectEncoding("key").Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			idleTime := client.ObjectIdleTime("key")
+			Expect(idleTime.Err()).NotTo(HaveOccurred())
+			Expect(idleTime.Val()).To(Equal(time.Duration(0)))
+		})
+
+		It("should Persist", func() {
+			set := client.Set("key", "Hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			expire := client.Expire("key", 10*time.Second)
+			Expect(expire.Err()).NotTo(HaveOccurred())
+			Expect(expire.Val()).To(Equal(true))
+
+			ttl := client.TTL("key")
+			Expect(ttl.Err()).NotTo(HaveOccurred())
+			Expect(ttl.Val()).To(Equal(10 * time.Second))
+
+			persist := client.Persist("key")
+			Expect(persist.Err()).NotTo(HaveOccurred())
+			Expect(persist.Val()).To(Equal(true))
+
+			ttl = client.TTL("key")
+			Expect(ttl.Err()).NotTo(HaveOccurred())
+			Expect(ttl.Val() < 0).To(Equal(true))
+		})
+
+		It("should PExpire", func() {
+			set := client.Set("key", "Hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			expiration := 900 * time.Millisecond
+			pexpire := client.PExpire("key", expiration)
+			Expect(pexpire.Err()).NotTo(HaveOccurred())
+			Expect(pexpire.Val()).To(Equal(true))
+
+			ttl := client.TTL("key")
+			Expect(ttl.Err()).NotTo(HaveOccurred())
+			Expect(ttl.Val()).To(Equal(time.Second))
+
+			pttl := client.PTTL("key")
+			Expect(pttl.Err()).NotTo(HaveOccurred())
+			Expect(pttl.Val()).To(BeNumerically("~", expiration, 100*time.Millisecond))
+		})
+
+		It("should PExpireAt", func() {
+			set := client.Set("key", "Hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			expiration := 900 * time.Millisecond
+			pexpireat := client.PExpireAt("key", time.Now().Add(expiration))
+			Expect(pexpireat.Err()).NotTo(HaveOccurred())
+			Expect(pexpireat.Val()).To(Equal(true))
+
+			ttl := client.TTL("key")
+			Expect(ttl.Err()).NotTo(HaveOccurred())
+			Expect(ttl.Val()).To(Equal(time.Second))
+
+			pttl := client.PTTL("key")
+			Expect(pttl.Err()).NotTo(HaveOccurred())
+			Expect(pttl.Val()).To(BeNumerically("~", expiration, 100*time.Millisecond))
+		})
+
+		It("should PTTL", func() {
+			set := client.Set("key", "Hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			expiration := time.Second
+			expire := client.Expire("key", expiration)
+			Expect(expire.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			pttl := client.PTTL("key")
+			Expect(pttl.Err()).NotTo(HaveOccurred())
+			Expect(pttl.Val()).To(BeNumerically("~", expiration, 100*time.Millisecond))
+		})
+
+		It("should RandomKey", func() {
+			randomKey := client.RandomKey()
+			Expect(randomKey.Err()).To(Equal(redis.Nil))
+			Expect(randomKey.Val()).To(Equal(""))
+
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			randomKey = client.RandomKey()
+			Expect(randomKey.Err()).NotTo(HaveOccurred())
+			Expect(randomKey.Val()).To(Equal("key"))
+		})
+
+		It("should Rename", func() {
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			status := client.Rename("key", "key1")
+			Expect(status.Err()).NotTo(HaveOccurred())
+			Expect(status.Val()).To(Equal("OK"))
+
+			get := client.Get("key1")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("hello"))
+		})
+
+		It("should RenameNX", func() {
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			renameNX := client.RenameNX("key", "key1")
+			Expect(renameNX.Err()).NotTo(HaveOccurred())
+			Expect(renameNX.Val()).To(Equal(true))
+
+			get := client.Get("key1")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("hello"))
+		})
+
+		It("should Restore", func() {
+			err := client.Set("key", "hello", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			dump := client.Dump("key")
+			Expect(dump.Err()).NotTo(HaveOccurred())
+
+			err = client.Del("key").Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			restore, err := client.Restore("key", 0, dump.Val()).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(restore).To(Equal("OK"))
+
+			type_, err := client.Type("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(type_).To(Equal("string"))
+
+			val, err := client.Get("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal("hello"))
+		})
+
+		It("should RestoreReplace", func() {
+			err := client.Set("key", "hello", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			dump := client.Dump("key")
+			Expect(dump.Err()).NotTo(HaveOccurred())
+
+			restore, err := client.RestoreReplace("key", 0, dump.Val()).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(restore).To(Equal("OK"))
+
+			type_, err := client.Type("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(type_).To(Equal("string"))
+
+			val, err := client.Get("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal("hello"))
+		})
+
+		It("should Sort", func() {
+			size, err := client.LPush("list", "1").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(1)))
+
+			size, err = client.LPush("list", "3").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(2)))
+
+			size, err = client.LPush("list", "2").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(3)))
+
+			els, err := client.Sort("list", &redis.Sort{
+				Offset: 0,
+				Count:  2,
+				Order:  "ASC",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(els).To(Equal([]string{"1", "2"}))
+		})
+
+		It("should Sort and Get", func() {
+			size, err := client.LPush("list", "1").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(1)))
+
+			size, err = client.LPush("list", "3").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(2)))
+
+			size, err = client.LPush("list", "2").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(3)))
+
+			err = client.Set("object_2", "value2", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			{
+				els, err := client.Sort("list", &redis.Sort{
+					Get: []string{"object_*"},
+				}).Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(els).To(Equal([]string{"", "value2", ""}))
+			}
+
+			{
+				els, err := client.SortInterfaces("list", &redis.Sort{
+					Get: []string{"object_*"},
+				}).Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(els).To(Equal([]interface{}{nil, "value2", nil}))
+			}
+		})
+
+		It("should Sort and Store", func() {
+			size, err := client.LPush("list", "1").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(1)))
+
+			size, err = client.LPush("list", "3").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(2)))
+
+			size, err = client.LPush("list", "2").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(size).To(Equal(int64(3)))
+
+			n, err := client.SortStore("list", "list2", &redis.Sort{
+				Offset: 0,
+				Count:  2,
+				Order:  "ASC",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(2)))
+
+			els, err := client.LRange("list2", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(els).To(Equal([]string{"1", "2"}))
+		})
+
+		It("should Touch", func() {
+			set1 := client.Set("touch1", "hello", 0)
+			Expect(set1.Err()).NotTo(HaveOccurred())
+			Expect(set1.Val()).To(Equal("OK"))
+
+			set2 := client.Set("touch2", "hello", 0)
+			Expect(set2.Err()).NotTo(HaveOccurred())
+			Expect(set2.Val()).To(Equal("OK"))
+
+			touch := client.Touch("touch1", "touch2", "touch3")
+			Expect(touch.Err()).NotTo(HaveOccurred())
+			Expect(touch.Val()).To(Equal(int64(2)))
+		})
+
+		It("should TTL", func() {
+			ttl := client.TTL("key")
+			Expect(ttl.Err()).NotTo(HaveOccurred())
+			Expect(ttl.Val() < 0).To(Equal(true))
+
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			expire := client.Expire("key", 60*time.Second)
+			Expect(expire.Err()).NotTo(HaveOccurred())
+			Expect(expire.Val()).To(Equal(true))
+
+			ttl = client.TTL("key")
+			Expect(ttl.Err()).NotTo(HaveOccurred())
+			Expect(ttl.Val()).To(Equal(60 * time.Second))
+		})
+
+		It("should Type", func() {
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			type_ := client.Type("key")
+			Expect(type_.Err()).NotTo(HaveOccurred())
+			Expect(type_.Val()).To(Equal("string"))
+		})
+
+	})
+
+	Describe("scanning", func() {
+
+		It("should Scan", func() {
+			for i := 0; i < 1000; i++ {
+				set := client.Set(fmt.Sprintf("key%d", i), "hello", 0)
+				Expect(set.Err()).NotTo(HaveOccurred())
+			}
+
+			keys, cursor, err := client.Scan(0, "", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(keys).NotTo(BeEmpty())
+			Expect(cursor).NotTo(BeZero())
+		})
+
+		It("should SScan", func() {
+			for i := 0; i < 1000; i++ {
+				sadd := client.SAdd("myset", fmt.Sprintf("member%d", i))
+				Expect(sadd.Err()).NotTo(HaveOccurred())
+			}
+
+			keys, cursor, err := client.SScan("myset", 0, "", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(keys).NotTo(BeEmpty())
+			Expect(cursor).NotTo(BeZero())
+		})
+
+		It("should HScan", func() {
+			for i := 0; i < 1000; i++ {
+				sadd := client.HSet("myhash", fmt.Sprintf("key%d", i), "hello")
+				Expect(sadd.Err()).NotTo(HaveOccurred())
+			}
+
+			keys, cursor, err := client.HScan("myhash", 0, "", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(keys).NotTo(BeEmpty())
+			Expect(cursor).NotTo(BeZero())
+		})
+
+		It("should ZScan", func() {
+			for i := 0; i < 1000; i++ {
+				err := client.ZAdd("myset", &redis.Z{
+					Score:  float64(i),
+					Member: fmt.Sprintf("member%d", i),
+				}).Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+
+			keys, cursor, err := client.ZScan("myset", 0, "", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(keys).NotTo(BeEmpty())
+			Expect(cursor).NotTo(BeZero())
+		})
+
+	})
+
+	Describe("strings", func() {
+
+		It("should Append", func() {
+			n, err := client.Exists("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(0)))
+
+			append := client.Append("key", "Hello")
+			Expect(append.Err()).NotTo(HaveOccurred())
+			Expect(append.Val()).To(Equal(int64(5)))
+
+			append = client.Append("key", " World")
+			Expect(append.Err()).NotTo(HaveOccurred())
+			Expect(append.Val()).To(Equal(int64(11)))
+
+			get := client.Get("key")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("Hello World"))
+		})
+
+		It("should BitCount", func() {
+			set := client.Set("key", "foobar", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			bitCount := client.BitCount("key", nil)
+			Expect(bitCount.Err()).NotTo(HaveOccurred())
+			Expect(bitCount.Val()).To(Equal(int64(26)))
+
+			bitCount = client.BitCount("key", &redis.BitCount{
+				Start: 0,
+				End:   0,
+			})
+			Expect(bitCount.Err()).NotTo(HaveOccurred())
+			Expect(bitCount.Val()).To(Equal(int64(4)))
+
+			bitCount = client.BitCount("key", &redis.BitCount{
+				Start: 1,
+				End:   1,
+			})
+			Expect(bitCount.Err()).NotTo(HaveOccurred())
+			Expect(bitCount.Val()).To(Equal(int64(6)))
+		})
+
+		It("should BitOpAnd", func() {
+			set := client.Set("key1", "1", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			set = client.Set("key2", "0", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			bitOpAnd := client.BitOpAnd("dest", "key1", "key2")
+			Expect(bitOpAnd.Err()).NotTo(HaveOccurred())
+			Expect(bitOpAnd.Val()).To(Equal(int64(1)))
+
+			get := client.Get("dest")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("0"))
+		})
+
+		It("should BitOpOr", func() {
+			set := client.Set("key1", "1", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			set = client.Set("key2", "0", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			bitOpOr := client.BitOpOr("dest", "key1", "key2")
+			Expect(bitOpOr.Err()).NotTo(HaveOccurred())
+			Expect(bitOpOr.Val()).To(Equal(int64(1)))
+
+			get := client.Get("dest")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("1"))
+		})
+
+		It("should BitOpXor", func() {
+			set := client.Set("key1", "\xff", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			set = client.Set("key2", "\x0f", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			bitOpXor := client.BitOpXor("dest", "key1", "key2")
+			Expect(bitOpXor.Err()).NotTo(HaveOccurred())
+			Expect(bitOpXor.Val()).To(Equal(int64(1)))
+
+			get := client.Get("dest")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("\xf0"))
+		})
+
+		It("should BitOpNot", func() {
+			set := client.Set("key1", "\x00", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			bitOpNot := client.BitOpNot("dest", "key1")
+			Expect(bitOpNot.Err()).NotTo(HaveOccurred())
+			Expect(bitOpNot.Val()).To(Equal(int64(1)))
+
+			get := client.Get("dest")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("\xff"))
+		})
+
+		It("should BitPos", func() {
+			err := client.Set("mykey", "\xff\xf0\x00", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			pos, err := client.BitPos("mykey", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(pos).To(Equal(int64(12)))
+
+			pos, err = client.BitPos("mykey", 1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(pos).To(Equal(int64(0)))
+
+			pos, err = client.BitPos("mykey", 0, 2).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(pos).To(Equal(int64(16)))
+
+			pos, err = client.BitPos("mykey", 1, 2).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(pos).To(Equal(int64(-1)))
+
+			pos, err = client.BitPos("mykey", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(pos).To(Equal(int64(16)))
+
+			pos, err = client.BitPos("mykey", 1, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(pos).To(Equal(int64(-1)))
+
+			pos, err = client.BitPos("mykey", 0, 2, 1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(pos).To(Equal(int64(-1)))
+
+			pos, err = client.BitPos("mykey", 0, 0, -3).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(pos).To(Equal(int64(-1)))
+
+			pos, err = client.BitPos("mykey", 0, 0, 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(pos).To(Equal(int64(-1)))
+		})
+
+		It("should BitField", func() {
+			nn, err := client.BitField("mykey", "INCRBY", "i5", 100, 1, "GET", "u4", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(nn).To(Equal([]int64{1, 0}))
+		})
+
+		It("should Decr", func() {
+			set := client.Set("key", "10", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			decr := client.Decr("key")
+			Expect(decr.Err()).NotTo(HaveOccurred())
+			Expect(decr.Val()).To(Equal(int64(9)))
+
+			set = client.Set("key", "234293482390480948029348230948", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			decr = client.Decr("key")
+			Expect(decr.Err()).To(MatchError("ERR value is not an integer or out of range"))
+			Expect(decr.Val()).To(Equal(int64(0)))
+		})
+
+		It("should DecrBy", func() {
+			set := client.Set("key", "10", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			decrBy := client.DecrBy("key", 5)
+			Expect(decrBy.Err()).NotTo(HaveOccurred())
+			Expect(decrBy.Val()).To(Equal(int64(5)))
+		})
+
+		It("should Get", func() {
+			get := client.Get("_")
+			Expect(get.Err()).To(Equal(redis.Nil))
+			Expect(get.Val()).To(Equal(""))
+
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			get = client.Get("key")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("hello"))
+		})
+
+		It("should GetBit", func() {
+			setBit := client.SetBit("key", 7, 1)
+			Expect(setBit.Err()).NotTo(HaveOccurred())
+			Expect(setBit.Val()).To(Equal(int64(0)))
+
+			getBit := client.GetBit("key", 0)
+			Expect(getBit.Err()).NotTo(HaveOccurred())
+			Expect(getBit.Val()).To(Equal(int64(0)))
+
+			getBit = client.GetBit("key", 7)
+			Expect(getBit.Err()).NotTo(HaveOccurred())
+			Expect(getBit.Val()).To(Equal(int64(1)))
+
+			getBit = client.GetBit("key", 100)
+			Expect(getBit.Err()).NotTo(HaveOccurred())
+			Expect(getBit.Val()).To(Equal(int64(0)))
+		})
+
+		It("should GetRange", func() {
+			set := client.Set("key", "This is a string", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			getRange := client.GetRange("key", 0, 3)
+			Expect(getRange.Err()).NotTo(HaveOccurred())
+			Expect(getRange.Val()).To(Equal("This"))
+
+			getRange = client.GetRange("key", -3, -1)
+			Expect(getRange.Err()).NotTo(HaveOccurred())
+			Expect(getRange.Val()).To(Equal("ing"))
+
+			getRange = client.GetRange("key", 0, -1)
+			Expect(getRange.Err()).NotTo(HaveOccurred())
+			Expect(getRange.Val()).To(Equal("This is a string"))
+
+			getRange = client.GetRange("key", 10, 100)
+			Expect(getRange.Err()).NotTo(HaveOccurred())
+			Expect(getRange.Val()).To(Equal("string"))
+		})
+
+		It("should GetSet", func() {
+			incr := client.Incr("key")
+			Expect(incr.Err()).NotTo(HaveOccurred())
+			Expect(incr.Val()).To(Equal(int64(1)))
+
+			getSet := client.GetSet("key", "0")
+			Expect(getSet.Err()).NotTo(HaveOccurred())
+			Expect(getSet.Val()).To(Equal("1"))
+
+			get := client.Get("key")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("0"))
+		})
+
+		It("should Incr", func() {
+			set := client.Set("key", "10", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			incr := client.Incr("key")
+			Expect(incr.Err()).NotTo(HaveOccurred())
+			Expect(incr.Val()).To(Equal(int64(11)))
+
+			get := client.Get("key")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("11"))
+		})
+
+		It("should IncrBy", func() {
+			set := client.Set("key", "10", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			incrBy := client.IncrBy("key", 5)
+			Expect(incrBy.Err()).NotTo(HaveOccurred())
+			Expect(incrBy.Val()).To(Equal(int64(15)))
+		})
+
+		It("should IncrByFloat", func() {
+			set := client.Set("key", "10.50", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			incrByFloat := client.IncrByFloat("key", 0.1)
+			Expect(incrByFloat.Err()).NotTo(HaveOccurred())
+			Expect(incrByFloat.Val()).To(Equal(10.6))
+
+			set = client.Set("key", "5.0e3", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			incrByFloat = client.IncrByFloat("key", 2.0e2)
+			Expect(incrByFloat.Err()).NotTo(HaveOccurred())
+			Expect(incrByFloat.Val()).To(Equal(float64(5200)))
+		})
+
+		It("should IncrByFloatOverflow", func() {
+			incrByFloat := client.IncrByFloat("key", 996945661)
+			Expect(incrByFloat.Err()).NotTo(HaveOccurred())
+			Expect(incrByFloat.Val()).To(Equal(float64(996945661)))
+		})
+
+		It("should MSetMGet", func() {
+			mSet := client.MSet("key1", "hello1", "key2", "hello2")
+			Expect(mSet.Err()).NotTo(HaveOccurred())
+			Expect(mSet.Val()).To(Equal("OK"))
+
+			mGet := client.MGet("key1", "key2", "_")
+			Expect(mGet.Err()).NotTo(HaveOccurred())
+			Expect(mGet.Val()).To(Equal([]interface{}{"hello1", "hello2", nil}))
+		})
+
+		It("should MSetNX", func() {
+			mSetNX := client.MSetNX("key1", "hello1", "key2", "hello2")
+			Expect(mSetNX.Err()).NotTo(HaveOccurred())
+			Expect(mSetNX.Val()).To(Equal(true))
+
+			mSetNX = client.MSetNX("key2", "hello1", "key3", "hello2")
+			Expect(mSetNX.Err()).NotTo(HaveOccurred())
+			Expect(mSetNX.Val()).To(Equal(false))
+		})
+
+		It("should Set with expiration", func() {
+			err := client.Set("key", "hello", 100*time.Millisecond).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			val, err := client.Get("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal("hello"))
+
+			Eventually(func() error {
+				return client.Get("foo").Err()
+			}, "1s", "100ms").Should(Equal(redis.Nil))
+		})
+
+		It("should SetGet", func() {
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			get := client.Get("key")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("hello"))
+		})
+
+		It("should SetNX", func() {
+			setNX := client.SetNX("key", "hello", 0)
+			Expect(setNX.Err()).NotTo(HaveOccurred())
+			Expect(setNX.Val()).To(Equal(true))
+
+			setNX = client.SetNX("key", "hello2", 0)
+			Expect(setNX.Err()).NotTo(HaveOccurred())
+			Expect(setNX.Val()).To(Equal(false))
+
+			get := client.Get("key")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("hello"))
+		})
+
+		It("should SetNX with expiration", func() {
+			isSet, err := client.SetNX("key", "hello", time.Second).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(isSet).To(Equal(true))
+
+			isSet, err = client.SetNX("key", "hello2", time.Second).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(isSet).To(Equal(false))
+
+			val, err := client.Get("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal("hello"))
+		})
+
+		It("should SetXX", func() {
+			isSet, err := client.SetXX("key", "hello2", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(isSet).To(Equal(false))
+
+			err = client.Set("key", "hello", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			isSet, err = client.SetXX("key", "hello2", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(isSet).To(Equal(true))
+
+			val, err := client.Get("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal("hello2"))
+		})
+
+		It("should SetXX with expiration", func() {
+			isSet, err := client.SetXX("key", "hello2", time.Second).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(isSet).To(Equal(false))
+
+			err = client.Set("key", "hello", time.Second).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			isSet, err = client.SetXX("key", "hello2", time.Second).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(isSet).To(Equal(true))
+
+			val, err := client.Get("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal("hello2"))
+		})
+
+		It("should SetRange", func() {
+			set := client.Set("key", "Hello World", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			range_ := client.SetRange("key", 6, "Redis")
+			Expect(range_.Err()).NotTo(HaveOccurred())
+			Expect(range_.Val()).To(Equal(int64(11)))
+
+			get := client.Get("key")
+			Expect(get.Err()).NotTo(HaveOccurred())
+			Expect(get.Val()).To(Equal("Hello Redis"))
+		})
+
+		It("should StrLen", func() {
+			set := client.Set("key", "hello", 0)
+			Expect(set.Err()).NotTo(HaveOccurred())
+			Expect(set.Val()).To(Equal("OK"))
+
+			strLen := client.StrLen("key")
+			Expect(strLen.Err()).NotTo(HaveOccurred())
+			Expect(strLen.Val()).To(Equal(int64(5)))
+
+			strLen = client.StrLen("_")
+			Expect(strLen.Err()).NotTo(HaveOccurred())
+			Expect(strLen.Val()).To(Equal(int64(0)))
+		})
+
+	})
+
+	Describe("hashes", func() {
+
+		It("should HDel", func() {
+			hSet := client.HSet("hash", "key", "hello")
+			Expect(hSet.Err()).NotTo(HaveOccurred())
+
+			hDel := client.HDel("hash", "key")
+			Expect(hDel.Err()).NotTo(HaveOccurred())
+			Expect(hDel.Val()).To(Equal(int64(1)))
+
+			hDel = client.HDel("hash", "key")
+			Expect(hDel.Err()).NotTo(HaveOccurred())
+			Expect(hDel.Val()).To(Equal(int64(0)))
+		})
+
+		It("should HExists", func() {
+			hSet := client.HSet("hash", "key", "hello")
+			Expect(hSet.Err()).NotTo(HaveOccurred())
+
+			hExists := client.HExists("hash", "key")
+			Expect(hExists.Err()).NotTo(HaveOccurred())
+			Expect(hExists.Val()).To(Equal(true))
+
+			hExists = client.HExists("hash", "key1")
+			Expect(hExists.Err()).NotTo(HaveOccurred())
+			Expect(hExists.Val()).To(Equal(false))
+		})
+
+		It("should HGet", func() {
+			hSet := client.HSet("hash", "key", "hello")
+			Expect(hSet.Err()).NotTo(HaveOccurred())
+
+			hGet := client.HGet("hash", "key")
+			Expect(hGet.Err()).NotTo(HaveOccurred())
+			Expect(hGet.Val()).To(Equal("hello"))
+
+			hGet = client.HGet("hash", "key1")
+			Expect(hGet.Err()).To(Equal(redis.Nil))
+			Expect(hGet.Val()).To(Equal(""))
+		})
+
+		It("should HGetAll", func() {
+			err := client.HSet("hash", "key1", "hello1").Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.HSet("hash", "key2", "hello2").Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			m, err := client.HGetAll("hash").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(m).To(Equal(map[string]string{"key1": "hello1", "key2": "hello2"}))
+		})
+
+		It("should HIncrBy", func() {
+			hSet := client.HSet("hash", "key", "5")
+			Expect(hSet.Err()).NotTo(HaveOccurred())
+
+			hIncrBy := client.HIncrBy("hash", "key", 1)
+			Expect(hIncrBy.Err()).NotTo(HaveOccurred())
+			Expect(hIncrBy.Val()).To(Equal(int64(6)))
+
+			hIncrBy = client.HIncrBy("hash", "key", -1)
+			Expect(hIncrBy.Err()).NotTo(HaveOccurred())
+			Expect(hIncrBy.Val()).To(Equal(int64(5)))
+
+			hIncrBy = client.HIncrBy("hash", "key", -10)
+			Expect(hIncrBy.Err()).NotTo(HaveOccurred())
+			Expect(hIncrBy.Val()).To(Equal(int64(-5)))
+		})
+
+		It("should HIncrByFloat", func() {
+			hSet := client.HSet("hash", "field", "10.50")
+			Expect(hSet.Err()).NotTo(HaveOccurred())
+			Expect(hSet.Val()).To(Equal(true))
+
+			hIncrByFloat := client.HIncrByFloat("hash", "field", 0.1)
+			Expect(hIncrByFloat.Err()).NotTo(HaveOccurred())
+			Expect(hIncrByFloat.Val()).To(Equal(10.6))
+
+			hSet = client.HSet("hash", "field", "5.0e3")
+			Expect(hSet.Err()).NotTo(HaveOccurred())
+			Expect(hSet.Val()).To(Equal(false))
+
+			hIncrByFloat = client.HIncrByFloat("hash", "field", 2.0e2)
+			Expect(hIncrByFloat.Err()).NotTo(HaveOccurred())
+			Expect(hIncrByFloat.Val()).To(Equal(float64(5200)))
+		})
+
+		It("should HKeys", func() {
+			hkeys := client.HKeys("hash")
+			Expect(hkeys.Err()).NotTo(HaveOccurred())
+			Expect(hkeys.Val()).To(Equal([]string{}))
+
+			hset := client.HSet("hash", "key1", "hello1")
+			Expect(hset.Err()).NotTo(HaveOccurred())
+			hset = client.HSet("hash", "key2", "hello2")
+			Expect(hset.Err()).NotTo(HaveOccurred())
+
+			hkeys = client.HKeys("hash")
+			Expect(hkeys.Err()).NotTo(HaveOccurred())
+			Expect(hkeys.Val()).To(Equal([]string{"key1", "key2"}))
+		})
+
+		It("should HLen", func() {
+			hSet := client.HSet("hash", "key1", "hello1")
+			Expect(hSet.Err()).NotTo(HaveOccurred())
+			hSet = client.HSet("hash", "key2", "hello2")
+			Expect(hSet.Err()).NotTo(HaveOccurred())
+
+			hLen := client.HLen("hash")
+			Expect(hLen.Err()).NotTo(HaveOccurred())
+			Expect(hLen.Val()).To(Equal(int64(2)))
+		})
+
+		It("should HMGet", func() {
+			err := client.HMSet("hash", "key1", "hello1", "key2", "hello2").Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			vals, err := client.HMGet("hash", "key1", "key2", "_").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]interface{}{"hello1", "hello2", nil}))
+		})
+
+		It("should HMSet", func() {
+			ok, err := client.HMSet("hash", map[string]interface{}{
+				"key1": "hello1",
+				"key2": "hello2",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(ok).To(Equal(int64(2)))
+
+			v, err := client.HGet("hash", "key1").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(v).To(Equal("hello1"))
+
+			v, err = client.HGet("hash", "key2").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(v).To(Equal("hello2"))
+
+			keys, err := client.HKeys("hash").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(keys).To(ConsistOf([]string{"key1", "key2"}))
+		})
+
+		It("should HSet", func() {
+			hSet := client.HSet("hash", "key", "hello")
+			Expect(hSet.Err()).NotTo(HaveOccurred())
+			Expect(hSet.Val()).To(Equal(true))
+
+			hGet := client.HGet("hash", "key")
+			Expect(hGet.Err()).NotTo(HaveOccurred())
+			Expect(hGet.Val()).To(Equal("hello"))
+		})
+
+		It("should HSetNX", func() {
+			hSetNX := client.HSetNX("hash", "key", "hello")
+			Expect(hSetNX.Err()).NotTo(HaveOccurred())
+			Expect(hSetNX.Val()).To(Equal(true))
+
+			hSetNX = client.HSetNX("hash", "key", "hello")
+			Expect(hSetNX.Err()).NotTo(HaveOccurred())
+			Expect(hSetNX.Val()).To(Equal(false))
+
+			hGet := client.HGet("hash", "key")
+			Expect(hGet.Err()).NotTo(HaveOccurred())
+			Expect(hGet.Val()).To(Equal("hello"))
+		})
+
+		It("should HVals", func() {
+			err := client.HSet("hash", "key1", "hello1").Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.HSet("hash", "key2", "hello2").Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			v, err := client.HVals("hash").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(v).To(Equal([]string{"hello1", "hello2"}))
+
+			var slice []string
+			err = client.HVals("hash").ScanSlice(&slice)
+			Expect(err).NotTo(HaveOccurred())
+			Expect(slice).To(Equal([]string{"hello1", "hello2"}))
+		})
+
+	})
+
+	Describe("hyperloglog", func() {
+		It("should PFMerge", func() {
+			pfAdd := client.PFAdd("hll1", "1", "2", "3", "4", "5")
+			Expect(pfAdd.Err()).NotTo(HaveOccurred())
+
+			pfCount := client.PFCount("hll1")
+			Expect(pfCount.Err()).NotTo(HaveOccurred())
+			Expect(pfCount.Val()).To(Equal(int64(5)))
+
+			pfAdd = client.PFAdd("hll2", "a", "b", "c", "d", "e")
+			Expect(pfAdd.Err()).NotTo(HaveOccurred())
+
+			pfMerge := client.PFMerge("hllMerged", "hll1", "hll2")
+			Expect(pfMerge.Err()).NotTo(HaveOccurred())
+
+			pfCount = client.PFCount("hllMerged")
+			Expect(pfCount.Err()).NotTo(HaveOccurred())
+			Expect(pfCount.Val()).To(Equal(int64(10)))
+
+			pfCount = client.PFCount("hll1", "hll2")
+			Expect(pfCount.Err()).NotTo(HaveOccurred())
+			Expect(pfCount.Val()).To(Equal(int64(10)))
+		})
+	})
+
+	Describe("lists", func() {
+
+		It("should BLPop", func() {
+			rPush := client.RPush("list1", "a", "b", "c")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			bLPop := client.BLPop(0, "list1", "list2")
+			Expect(bLPop.Err()).NotTo(HaveOccurred())
+			Expect(bLPop.Val()).To(Equal([]string{"list1", "a"}))
+		})
+
+		It("should BLPopBlocks", func() {
+			started := make(chan bool)
+			done := make(chan bool)
+			go func() {
+				defer GinkgoRecover()
+
+				started <- true
+				bLPop := client.BLPop(0, "list")
+				Expect(bLPop.Err()).NotTo(HaveOccurred())
+				Expect(bLPop.Val()).To(Equal([]string{"list", "a"}))
+				done <- true
+			}()
+			<-started
+
+			select {
+			case <-done:
+				Fail("BLPop is not blocked")
+			case <-time.After(time.Second):
+				// ok
+			}
+
+			rPush := client.RPush("list", "a")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			select {
+			case <-done:
+				// ok
+			case <-time.After(time.Second):
+				Fail("BLPop is still blocked")
+			}
+		})
+
+		It("should BLPop timeout", func() {
+			val, err := client.BLPop(time.Second, "list1").Result()
+			Expect(err).To(Equal(redis.Nil))
+			Expect(val).To(BeNil())
+
+			Expect(client.Ping().Err()).NotTo(HaveOccurred())
+
+			stats := client.PoolStats()
+			Expect(stats.Hits).To(Equal(uint32(2)))
+			Expect(stats.Misses).To(Equal(uint32(1)))
+			Expect(stats.Timeouts).To(Equal(uint32(0)))
+		})
+
+		It("should BRPop", func() {
+			rPush := client.RPush("list1", "a", "b", "c")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			bRPop := client.BRPop(0, "list1", "list2")
+			Expect(bRPop.Err()).NotTo(HaveOccurred())
+			Expect(bRPop.Val()).To(Equal([]string{"list1", "c"}))
+		})
+
+		It("should BRPop blocks", func() {
+			started := make(chan bool)
+			done := make(chan bool)
+			go func() {
+				defer GinkgoRecover()
+
+				started <- true
+				brpop := client.BRPop(0, "list")
+				Expect(brpop.Err()).NotTo(HaveOccurred())
+				Expect(brpop.Val()).To(Equal([]string{"list", "a"}))
+				done <- true
+			}()
+			<-started
+
+			select {
+			case <-done:
+				Fail("BRPop is not blocked")
+			case <-time.After(time.Second):
+				// ok
+			}
+
+			rPush := client.RPush("list", "a")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			select {
+			case <-done:
+				// ok
+			case <-time.After(time.Second):
+				Fail("BRPop is still blocked")
+				// ok
+			}
+		})
+
+		It("should BRPopLPush", func() {
+			_, err := client.BRPopLPush("list1", "list2", time.Second).Result()
+			Expect(err).To(Equal(redis.Nil))
+
+			err = client.RPush("list1", "a", "b", "c").Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			v, err := client.BRPopLPush("list1", "list2", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(v).To(Equal("c"))
+		})
+
+		It("should LIndex", func() {
+			lPush := client.LPush("list", "World")
+			Expect(lPush.Err()).NotTo(HaveOccurred())
+			lPush = client.LPush("list", "Hello")
+			Expect(lPush.Err()).NotTo(HaveOccurred())
+
+			lIndex := client.LIndex("list", 0)
+			Expect(lIndex.Err()).NotTo(HaveOccurred())
+			Expect(lIndex.Val()).To(Equal("Hello"))
+
+			lIndex = client.LIndex("list", -1)
+			Expect(lIndex.Err()).NotTo(HaveOccurred())
+			Expect(lIndex.Val()).To(Equal("World"))
+
+			lIndex = client.LIndex("list", 3)
+			Expect(lIndex.Err()).To(Equal(redis.Nil))
+			Expect(lIndex.Val()).To(Equal(""))
+		})
+
+		It("should LInsert", func() {
+			rPush := client.RPush("list", "Hello")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "World")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			lInsert := client.LInsert("list", "BEFORE", "World", "There")
+			Expect(lInsert.Err()).NotTo(HaveOccurred())
+			Expect(lInsert.Val()).To(Equal(int64(3)))
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"Hello", "There", "World"}))
+		})
+
+		It("should LLen", func() {
+			lPush := client.LPush("list", "World")
+			Expect(lPush.Err()).NotTo(HaveOccurred())
+			lPush = client.LPush("list", "Hello")
+			Expect(lPush.Err()).NotTo(HaveOccurred())
+
+			lLen := client.LLen("list")
+			Expect(lLen.Err()).NotTo(HaveOccurred())
+			Expect(lLen.Val()).To(Equal(int64(2)))
+		})
+
+		It("should LPop", func() {
+			rPush := client.RPush("list", "one")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "two")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "three")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			lPop := client.LPop("list")
+			Expect(lPop.Err()).NotTo(HaveOccurred())
+			Expect(lPop.Val()).To(Equal("one"))
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"two", "three"}))
+		})
+
+		It("should LPush", func() {
+			lPush := client.LPush("list", "World")
+			Expect(lPush.Err()).NotTo(HaveOccurred())
+			lPush = client.LPush("list", "Hello")
+			Expect(lPush.Err()).NotTo(HaveOccurred())
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+		})
+
+		It("should LPushX", func() {
+			lPush := client.LPush("list", "World")
+			Expect(lPush.Err()).NotTo(HaveOccurred())
+
+			lPushX := client.LPushX("list", "Hello")
+			Expect(lPushX.Err()).NotTo(HaveOccurred())
+			Expect(lPushX.Val()).To(Equal(int64(2)))
+
+			lPush = client.LPush("list1", "three")
+			Expect(lPush.Err()).NotTo(HaveOccurred())
+			Expect(lPush.Val()).To(Equal(int64(1)))
+
+			lPushX = client.LPushX("list1", "two", "one")
+			Expect(lPushX.Err()).NotTo(HaveOccurred())
+			Expect(lPushX.Val()).To(Equal(int64(3)))
+
+			lPushX = client.LPushX("list2", "Hello")
+			Expect(lPushX.Err()).NotTo(HaveOccurred())
+			Expect(lPushX.Val()).To(Equal(int64(0)))
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+
+			lRange = client.LRange("list1", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+			lRange = client.LRange("list2", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{}))
+		})
+
+		It("should LRange", func() {
+			rPush := client.RPush("list", "one")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "two")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "three")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			lRange := client.LRange("list", 0, 0)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"one"}))
+
+			lRange = client.LRange("list", -3, 2)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+			lRange = client.LRange("list", -100, 100)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+			lRange = client.LRange("list", 5, 10)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{}))
+		})
+
+		It("should LRem", func() {
+			rPush := client.RPush("list", "hello")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "hello")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "key")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "hello")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			lRem := client.LRem("list", -2, "hello")
+			Expect(lRem.Err()).NotTo(HaveOccurred())
+			Expect(lRem.Val()).To(Equal(int64(2)))
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"hello", "key"}))
+		})
+
+		It("should LSet", func() {
+			rPush := client.RPush("list", "one")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "two")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "three")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			lSet := client.LSet("list", 0, "four")
+			Expect(lSet.Err()).NotTo(HaveOccurred())
+			Expect(lSet.Val()).To(Equal("OK"))
+
+			lSet = client.LSet("list", -2, "five")
+			Expect(lSet.Err()).NotTo(HaveOccurred())
+			Expect(lSet.Val()).To(Equal("OK"))
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"four", "five", "three"}))
+		})
+
+		It("should LTrim", func() {
+			rPush := client.RPush("list", "one")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "two")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "three")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			lTrim := client.LTrim("list", 1, -1)
+			Expect(lTrim.Err()).NotTo(HaveOccurred())
+			Expect(lTrim.Val()).To(Equal("OK"))
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"two", "three"}))
+		})
+
+		It("should RPop", func() {
+			rPush := client.RPush("list", "one")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "two")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "three")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			rPop := client.RPop("list")
+			Expect(rPop.Err()).NotTo(HaveOccurred())
+			Expect(rPop.Val()).To(Equal("three"))
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"one", "two"}))
+		})
+
+		It("should RPopLPush", func() {
+			rPush := client.RPush("list", "one")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "two")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			rPush = client.RPush("list", "three")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+
+			rPopLPush := client.RPopLPush("list", "list2")
+			Expect(rPopLPush.Err()).NotTo(HaveOccurred())
+			Expect(rPopLPush.Val()).To(Equal("three"))
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"one", "two"}))
+
+			lRange = client.LRange("list2", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"three"}))
+		})
+
+		It("should RPush", func() {
+			rPush := client.RPush("list", "Hello")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			Expect(rPush.Val()).To(Equal(int64(1)))
+
+			rPush = client.RPush("list", "World")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			Expect(rPush.Val()).To(Equal(int64(2)))
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+		})
+
+		It("should RPushX", func() {
+			rPush := client.RPush("list", "Hello")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			Expect(rPush.Val()).To(Equal(int64(1)))
+
+			rPushX := client.RPushX("list", "World")
+			Expect(rPushX.Err()).NotTo(HaveOccurred())
+			Expect(rPushX.Val()).To(Equal(int64(2)))
+
+			rPush = client.RPush("list1", "one")
+			Expect(rPush.Err()).NotTo(HaveOccurred())
+			Expect(rPush.Val()).To(Equal(int64(1)))
+
+			rPushX = client.RPushX("list1", "two", "three")
+			Expect(rPushX.Err()).NotTo(HaveOccurred())
+			Expect(rPushX.Val()).To(Equal(int64(3)))
+
+			rPushX = client.RPushX("list2", "World")
+			Expect(rPushX.Err()).NotTo(HaveOccurred())
+			Expect(rPushX.Val()).To(Equal(int64(0)))
+
+			lRange := client.LRange("list", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+
+			lRange = client.LRange("list1", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+			lRange = client.LRange("list2", 0, -1)
+			Expect(lRange.Err()).NotTo(HaveOccurred())
+			Expect(lRange.Val()).To(Equal([]string{}))
+		})
+
+	})
+
+	Describe("sets", func() {
+
+		It("should SAdd", func() {
+			sAdd := client.SAdd("set", "Hello")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			Expect(sAdd.Val()).To(Equal(int64(1)))
+
+			sAdd = client.SAdd("set", "World")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			Expect(sAdd.Val()).To(Equal(int64(1)))
+
+			sAdd = client.SAdd("set", "World")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			Expect(sAdd.Val()).To(Equal(int64(0)))
+
+			sMembers := client.SMembers("set")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(ConsistOf([]string{"Hello", "World"}))
+		})
+
+		It("should SAdd strings", func() {
+			set := []string{"Hello", "World", "World"}
+			sAdd := client.SAdd("set", set)
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			Expect(sAdd.Val()).To(Equal(int64(2)))
+
+			sMembers := client.SMembers("set")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(ConsistOf([]string{"Hello", "World"}))
+		})
+
+		It("should SCard", func() {
+			sAdd := client.SAdd("set", "Hello")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			Expect(sAdd.Val()).To(Equal(int64(1)))
+
+			sAdd = client.SAdd("set", "World")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			Expect(sAdd.Val()).To(Equal(int64(1)))
+
+			sCard := client.SCard("set")
+			Expect(sCard.Err()).NotTo(HaveOccurred())
+			Expect(sCard.Val()).To(Equal(int64(2)))
+		})
+
+		It("should SDiff", func() {
+			sAdd := client.SAdd("set1", "a")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "b")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sAdd = client.SAdd("set2", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "d")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "e")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sDiff := client.SDiff("set1", "set2")
+			Expect(sDiff.Err()).NotTo(HaveOccurred())
+			Expect(sDiff.Val()).To(ConsistOf([]string{"a", "b"}))
+		})
+
+		It("should SDiffStore", func() {
+			sAdd := client.SAdd("set1", "a")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "b")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sAdd = client.SAdd("set2", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "d")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "e")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sDiffStore := client.SDiffStore("set", "set1", "set2")
+			Expect(sDiffStore.Err()).NotTo(HaveOccurred())
+			Expect(sDiffStore.Val()).To(Equal(int64(2)))
+
+			sMembers := client.SMembers("set")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(ConsistOf([]string{"a", "b"}))
+		})
+
+		It("should SInter", func() {
+			sAdd := client.SAdd("set1", "a")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "b")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sAdd = client.SAdd("set2", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "d")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "e")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sInter := client.SInter("set1", "set2")
+			Expect(sInter.Err()).NotTo(HaveOccurred())
+			Expect(sInter.Val()).To(Equal([]string{"c"}))
+		})
+
+		It("should SInterStore", func() {
+			sAdd := client.SAdd("set1", "a")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "b")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sAdd = client.SAdd("set2", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "d")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "e")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sInterStore := client.SInterStore("set", "set1", "set2")
+			Expect(sInterStore.Err()).NotTo(HaveOccurred())
+			Expect(sInterStore.Val()).To(Equal(int64(1)))
+
+			sMembers := client.SMembers("set")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(Equal([]string{"c"}))
+		})
+
+		It("should IsMember", func() {
+			sAdd := client.SAdd("set", "one")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sIsMember := client.SIsMember("set", "one")
+			Expect(sIsMember.Err()).NotTo(HaveOccurred())
+			Expect(sIsMember.Val()).To(Equal(true))
+
+			sIsMember = client.SIsMember("set", "two")
+			Expect(sIsMember.Err()).NotTo(HaveOccurred())
+			Expect(sIsMember.Val()).To(Equal(false))
+		})
+
+		It("should SMembers", func() {
+			sAdd := client.SAdd("set", "Hello")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set", "World")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sMembers := client.SMembers("set")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(ConsistOf([]string{"Hello", "World"}))
+		})
+
+		It("should SMembersMap", func() {
+			sAdd := client.SAdd("set", "Hello")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set", "World")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sMembersMap := client.SMembersMap("set")
+			Expect(sMembersMap.Err()).NotTo(HaveOccurred())
+			Expect(sMembersMap.Val()).To(Equal(map[string]struct{}{"Hello": {}, "World": {}}))
+		})
+
+		It("should SMove", func() {
+			sAdd := client.SAdd("set1", "one")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "two")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sAdd = client.SAdd("set2", "three")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sMove := client.SMove("set1", "set2", "two")
+			Expect(sMove.Err()).NotTo(HaveOccurred())
+			Expect(sMove.Val()).To(Equal(true))
+
+			sMembers := client.SMembers("set1")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(Equal([]string{"one"}))
+
+			sMembers = client.SMembers("set2")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(ConsistOf([]string{"three", "two"}))
+		})
+
+		It("should SPop", func() {
+			sAdd := client.SAdd("set", "one")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set", "two")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set", "three")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sPop := client.SPop("set")
+			Expect(sPop.Err()).NotTo(HaveOccurred())
+			Expect(sPop.Val()).NotTo(Equal(""))
+
+			sMembers := client.SMembers("set")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(HaveLen(2))
+
+		})
+
+		It("should SPopN", func() {
+			sAdd := client.SAdd("set", "one")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set", "two")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set", "three")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set", "four")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sPopN := client.SPopN("set", 1)
+			Expect(sPopN.Err()).NotTo(HaveOccurred())
+			Expect(sPopN.Val()).NotTo(Equal([]string{""}))
+
+			sMembers := client.SMembers("set")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(HaveLen(3))
+
+			sPopN = client.SPopN("set", 4)
+			Expect(sPopN.Err()).NotTo(HaveOccurred())
+			Expect(sPopN.Val()).To(HaveLen(3))
+
+			sMembers = client.SMembers("set")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(HaveLen(0))
+		})
+
+		It("should SRandMember and SRandMemberN", func() {
+			err := client.SAdd("set", "one").Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.SAdd("set", "two").Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.SAdd("set", "three").Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			members, err := client.SMembers("set").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(members).To(HaveLen(3))
+
+			member, err := client.SRandMember("set").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(member).NotTo(Equal(""))
+
+			members, err = client.SRandMemberN("set", 2).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(members).To(HaveLen(2))
+		})
+
+		It("should SRem", func() {
+			sAdd := client.SAdd("set", "one")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set", "two")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set", "three")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sRem := client.SRem("set", "one")
+			Expect(sRem.Err()).NotTo(HaveOccurred())
+			Expect(sRem.Val()).To(Equal(int64(1)))
+
+			sRem = client.SRem("set", "four")
+			Expect(sRem.Err()).NotTo(HaveOccurred())
+			Expect(sRem.Val()).To(Equal(int64(0)))
+
+			sMembers := client.SMembers("set")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(ConsistOf([]string{"three", "two"}))
+		})
+
+		It("should SUnion", func() {
+			sAdd := client.SAdd("set1", "a")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "b")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sAdd = client.SAdd("set2", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "d")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "e")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sUnion := client.SUnion("set1", "set2")
+			Expect(sUnion.Err()).NotTo(HaveOccurred())
+			Expect(sUnion.Val()).To(HaveLen(5))
+		})
+
+		It("should SUnionStore", func() {
+			sAdd := client.SAdd("set1", "a")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "b")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set1", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sAdd = client.SAdd("set2", "c")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "d")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+			sAdd = client.SAdd("set2", "e")
+			Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+			sUnionStore := client.SUnionStore("set", "set1", "set2")
+			Expect(sUnionStore.Err()).NotTo(HaveOccurred())
+			Expect(sUnionStore.Val()).To(Equal(int64(5)))
+
+			sMembers := client.SMembers("set")
+			Expect(sMembers.Err()).NotTo(HaveOccurred())
+			Expect(sMembers.Val()).To(HaveLen(5))
+		})
+
+	})
+
+	Describe("sorted sets", func() {
+
+		It("should BZPopMax", func() {
+			err := client.ZAdd("zset1", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset1", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset1", &redis.Z{
+				Score:  3,
+				Member: "three",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			member, err := client.BZPopMax(0, "zset1", "zset2").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(member).To(Equal(&redis.ZWithKey{
+				Z: redis.Z{
+					Score:  3,
+					Member: "three",
+				},
+				Key: "zset1",
+			}))
+		})
+
+		It("should BZPopMax blocks", func() {
+			started := make(chan bool)
+			done := make(chan bool)
+			go func() {
+				defer GinkgoRecover()
+
+				started <- true
+				bZPopMax := client.BZPopMax(0, "zset")
+				Expect(bZPopMax.Err()).NotTo(HaveOccurred())
+				Expect(bZPopMax.Val()).To(Equal(&redis.ZWithKey{
+					Z: redis.Z{
+						Member: "a",
+						Score:  1,
+					},
+					Key: "zset",
+				}))
+				done <- true
+			}()
+			<-started
+
+			select {
+			case <-done:
+				Fail("BZPopMax is not blocked")
+			case <-time.After(time.Second):
+				// ok
+			}
+
+			zAdd := client.ZAdd("zset", &redis.Z{
+				Member: "a",
+				Score:  1,
+			})
+			Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+			select {
+			case <-done:
+				// ok
+			case <-time.After(time.Second):
+				Fail("BZPopMax is still blocked")
+			}
+		})
+
+		It("should BZPopMax timeout", func() {
+			val, err := client.BZPopMax(time.Second, "zset1").Result()
+			Expect(err).To(Equal(redis.Nil))
+			Expect(val).To(BeNil())
+
+			Expect(client.Ping().Err()).NotTo(HaveOccurred())
+
+			stats := client.PoolStats()
+			Expect(stats.Hits).To(Equal(uint32(2)))
+			Expect(stats.Misses).To(Equal(uint32(1)))
+			Expect(stats.Timeouts).To(Equal(uint32(0)))
+		})
+
+		It("should BZPopMin", func() {
+			err := client.ZAdd("zset1", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset1", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset1", &redis.Z{
+				Score:  3,
+				Member: "three",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			member, err := client.BZPopMin(0, "zset1", "zset2").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(member).To(Equal(&redis.ZWithKey{
+				Z: redis.Z{
+					Score:  1,
+					Member: "one",
+				},
+				Key: "zset1",
+			}))
+		})
+
+		It("should BZPopMin blocks", func() {
+			started := make(chan bool)
+			done := make(chan bool)
+			go func() {
+				defer GinkgoRecover()
+
+				started <- true
+				bZPopMin := client.BZPopMin(0, "zset")
+				Expect(bZPopMin.Err()).NotTo(HaveOccurred())
+				Expect(bZPopMin.Val()).To(Equal(&redis.ZWithKey{
+					Z: redis.Z{
+						Member: "a",
+						Score:  1,
+					},
+					Key: "zset",
+				}))
+				done <- true
+			}()
+			<-started
+
+			select {
+			case <-done:
+				Fail("BZPopMin is not blocked")
+			case <-time.After(time.Second):
+				// ok
+			}
+
+			zAdd := client.ZAdd("zset", &redis.Z{
+				Member: "a",
+				Score:  1,
+			})
+			Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+			select {
+			case <-done:
+				// ok
+			case <-time.After(time.Second):
+				Fail("BZPopMin is still blocked")
+			}
+		})
+
+		It("should BZPopMin timeout", func() {
+			val, err := client.BZPopMin(time.Second, "zset1").Result()
+			Expect(err).To(Equal(redis.Nil))
+			Expect(val).To(BeNil())
+
+			Expect(client.Ping().Err()).NotTo(HaveOccurred())
+
+			stats := client.PoolStats()
+			Expect(stats.Hits).To(Equal(uint32(2)))
+			Expect(stats.Misses).To(Equal(uint32(1)))
+			Expect(stats.Timeouts).To(Equal(uint32(0)))
+		})
+
+		It("should ZAdd", func() {
+			added, err := client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(1)))
+
+			added, err = client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "uno",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(1)))
+
+			added, err = client.ZAdd("zset", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(1)))
+
+			added, err = client.ZAdd("zset", &redis.Z{
+				Score:  3,
+				Member: "two",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(0)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  1,
+				Member: "one",
+			}, {
+				Score:  1,
+				Member: "uno",
+			}, {
+				Score:  3,
+				Member: "two",
+			}}))
+		})
+
+		It("should ZAdd bytes", func() {
+			added, err := client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: []byte("one"),
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(1)))
+
+			added, err = client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: []byte("uno"),
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(1)))
+
+			added, err = client.ZAdd("zset", &redis.Z{
+				Score:  2,
+				Member: []byte("two"),
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(1)))
+
+			added, err = client.ZAdd("zset", &redis.Z{
+				Score:  3,
+				Member: []byte("two"),
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(0)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  1,
+				Member: "one",
+			}, {
+				Score:  1,
+				Member: "uno",
+			}, {
+				Score:  3,
+				Member: "two",
+			}}))
+		})
+
+		It("should ZAddNX", func() {
+			added, err := client.ZAddNX("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(1)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+			added, err = client.ZAddNX("zset", &redis.Z{
+				Score:  2,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(0)))
+
+			vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+		})
+
+		It("should ZAddXX", func() {
+			added, err := client.ZAddXX("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(0)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(BeEmpty())
+
+			added, err = client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(1)))
+
+			added, err = client.ZAddXX("zset", &redis.Z{
+				Score:  2,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(0)))
+
+			vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+		})
+
+		It("should ZAddCh", func() {
+			changed, err := client.ZAddCh("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(changed).To(Equal(int64(1)))
+
+			changed, err = client.ZAddCh("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(changed).To(Equal(int64(0)))
+		})
+
+		It("should ZAddNXCh", func() {
+			changed, err := client.ZAddNXCh("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(changed).To(Equal(int64(1)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+			changed, err = client.ZAddNXCh("zset", &redis.Z{
+				Score:  2,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(changed).To(Equal(int64(0)))
+
+			vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  1,
+				Member: "one",
+			}}))
+		})
+
+		It("should ZAddXXCh", func() {
+			changed, err := client.ZAddXXCh("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(changed).To(Equal(int64(0)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(BeEmpty())
+
+			added, err := client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(1)))
+
+			changed, err = client.ZAddXXCh("zset", &redis.Z{
+				Score:  2,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(changed).To(Equal(int64(1)))
+
+			vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+		})
+
+		It("should ZIncr", func() {
+			score, err := client.ZIncr("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(score).To(Equal(float64(1)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+			score, err = client.ZIncr("zset", &redis.Z{Score: 1, Member: "one"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(score).To(Equal(float64(2)))
+
+			vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+		})
+
+		It("should ZIncrNX", func() {
+			score, err := client.ZIncrNX("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(score).To(Equal(float64(1)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+			score, err = client.ZIncrNX("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).To(Equal(redis.Nil))
+			Expect(score).To(Equal(float64(0)))
+
+			vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+		})
+
+		It("should ZIncrXX", func() {
+			score, err := client.ZIncrXX("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).To(Equal(redis.Nil))
+			Expect(score).To(Equal(float64(0)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(BeEmpty())
+
+			added, err := client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(added).To(Equal(int64(1)))
+
+			score, err = client.ZIncrXX("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(score).To(Equal(float64(2)))
+
+			vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+		})
+
+		It("should ZCard", func() {
+			err := client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			card, err := client.ZCard("zset").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(card).To(Equal(int64(2)))
+		})
+
+		It("should ZCount", func() {
+			err := client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  3,
+				Member: "three",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			count, err := client.ZCount("zset", "-inf", "+inf").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(count).To(Equal(int64(3)))
+
+			count, err = client.ZCount("zset", "(1", "3").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(count).To(Equal(int64(2)))
+
+			count, err = client.ZLexCount("zset", "-", "+").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(count).To(Equal(int64(3)))
+		})
+
+		It("should ZIncrBy", func() {
+			err := client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			n, err := client.ZIncrBy("zset", 2, "one").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(float64(3)))
+
+			val, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal([]redis.Z{{
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  3,
+				Member: "one",
+			}}))
+		})
+
+		It("should ZInterStore", func() {
+			err := client.ZAdd("zset1", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset1", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			err = client.ZAdd("zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset3", &redis.Z{Score: 3, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			n, err := client.ZInterStore("out", &redis.ZStore{
+				Keys:    []string{"zset1", "zset2"},
+				Weights: []float64{2, 3},
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(2)))
+
+			vals, err := client.ZRangeWithScores("out", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  5,
+				Member: "one",
+			}, {
+				Score:  10,
+				Member: "two",
+			}}))
+		})
+
+		It("should ZPopMax", func() {
+			err := client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  3,
+				Member: "three",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			members, err := client.ZPopMax("zset").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(members).To(Equal([]redis.Z{{
+				Score:  3,
+				Member: "three",
+			}}))
+
+			// adding back 3
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  3,
+				Member: "three",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			members, err = client.ZPopMax("zset", 2).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(members).To(Equal([]redis.Z{{
+				Score:  3,
+				Member: "three",
+			}, {
+				Score:  2,
+				Member: "two",
+			}}))
+
+			// adding back 2 & 3
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  3,
+				Member: "three",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			members, err = client.ZPopMax("zset", 10).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(members).To(Equal([]redis.Z{{
+				Score:  3,
+				Member: "three",
+			}, {
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  1,
+				Member: "one",
+			}}))
+		})
+
+		It("should ZPopMin", func() {
+			err := client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  3,
+				Member: "three",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			members, err := client.ZPopMin("zset").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(members).To(Equal([]redis.Z{{
+				Score:  1,
+				Member: "one",
+			}}))
+
+			// adding back 1
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			members, err = client.ZPopMin("zset", 2).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(members).To(Equal([]redis.Z{{
+				Score:  1,
+				Member: "one",
+			}, {
+				Score:  2,
+				Member: "two",
+			}}))
+
+			// adding back 1 & 2
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  1,
+				Member: "one",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  2,
+				Member: "two",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			members, err = client.ZPopMin("zset", 10).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(members).To(Equal([]redis.Z{{
+				Score:  1,
+				Member: "one",
+			}, {
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  3,
+				Member: "three",
+			}}))
+		})
+
+		It("should ZRange", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			zRange := client.ZRange("zset", 0, -1)
+			Expect(zRange.Err()).NotTo(HaveOccurred())
+			Expect(zRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+			zRange = client.ZRange("zset", 2, 3)
+			Expect(zRange.Err()).NotTo(HaveOccurred())
+			Expect(zRange.Val()).To(Equal([]string{"three"}))
+
+			zRange = client.ZRange("zset", -2, -1)
+			Expect(zRange.Err()).NotTo(HaveOccurred())
+			Expect(zRange.Val()).To(Equal([]string{"two", "three"}))
+		})
+
+		It("should ZRangeWithScores", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  1,
+				Member: "one",
+			}, {
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  3,
+				Member: "three",
+			}}))
+
+			vals, err = client.ZRangeWithScores("zset", 2, 3).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 3, Member: "three"}}))
+
+			vals, err = client.ZRangeWithScores("zset", -2, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  3,
+				Member: "three",
+			}}))
+		})
+
+		It("should ZRangeByScore", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			zRangeByScore := client.ZRangeByScore("zset", &redis.ZRangeBy{
+				Min: "-inf",
+				Max: "+inf",
+			})
+			Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+			Expect(zRangeByScore.Val()).To(Equal([]string{"one", "two", "three"}))
+
+			zRangeByScore = client.ZRangeByScore("zset", &redis.ZRangeBy{
+				Min: "1",
+				Max: "2",
+			})
+			Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+			Expect(zRangeByScore.Val()).To(Equal([]string{"one", "two"}))
+
+			zRangeByScore = client.ZRangeByScore("zset", &redis.ZRangeBy{
+				Min: "(1",
+				Max: "2",
+			})
+			Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+			Expect(zRangeByScore.Val()).To(Equal([]string{"two"}))
+
+			zRangeByScore = client.ZRangeByScore("zset", &redis.ZRangeBy{
+				Min: "(1",
+				Max: "(2",
+			})
+			Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+			Expect(zRangeByScore.Val()).To(Equal([]string{}))
+		})
+
+		It("should ZRangeByLex", func() {
+			err := client.ZAdd("zset", &redis.Z{
+				Score:  0,
+				Member: "a",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  0,
+				Member: "b",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{
+				Score:  0,
+				Member: "c",
+			}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			zRangeByLex := client.ZRangeByLex("zset", &redis.ZRangeBy{
+				Min: "-",
+				Max: "+",
+			})
+			Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+			Expect(zRangeByLex.Val()).To(Equal([]string{"a", "b", "c"}))
+
+			zRangeByLex = client.ZRangeByLex("zset", &redis.ZRangeBy{
+				Min: "[a",
+				Max: "[b",
+			})
+			Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+			Expect(zRangeByLex.Val()).To(Equal([]string{"a", "b"}))
+
+			zRangeByLex = client.ZRangeByLex("zset", &redis.ZRangeBy{
+				Min: "(a",
+				Max: "[b",
+			})
+			Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+			Expect(zRangeByLex.Val()).To(Equal([]string{"b"}))
+
+			zRangeByLex = client.ZRangeByLex("zset", &redis.ZRangeBy{
+				Min: "(a",
+				Max: "(b",
+			})
+			Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+			Expect(zRangeByLex.Val()).To(Equal([]string{}))
+		})
+
+		It("should ZRangeByScoreWithScoresMap", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			vals, err := client.ZRangeByScoreWithScores("zset", &redis.ZRangeBy{
+				Min: "-inf",
+				Max: "+inf",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  1,
+				Member: "one",
+			}, {
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  3,
+				Member: "three",
+			}}))
+
+			vals, err = client.ZRangeByScoreWithScores("zset", &redis.ZRangeBy{
+				Min: "1",
+				Max: "2",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  1,
+				Member: "one",
+			}, {
+				Score:  2,
+				Member: "two",
+			}}))
+
+			vals, err = client.ZRangeByScoreWithScores("zset", &redis.ZRangeBy{
+				Min: "(1",
+				Max: "2",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "two"}}))
+
+			vals, err = client.ZRangeByScoreWithScores("zset", &redis.ZRangeBy{
+				Min: "(1",
+				Max: "(2",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{}))
+		})
+
+		It("should ZRank", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			zRank := client.ZRank("zset", "three")
+			Expect(zRank.Err()).NotTo(HaveOccurred())
+			Expect(zRank.Val()).To(Equal(int64(2)))
+
+			zRank = client.ZRank("zset", "four")
+			Expect(zRank.Err()).To(Equal(redis.Nil))
+			Expect(zRank.Val()).To(Equal(int64(0)))
+		})
+
+		It("should ZRem", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			zRem := client.ZRem("zset", "two")
+			Expect(zRem.Err()).NotTo(HaveOccurred())
+			Expect(zRem.Val()).To(Equal(int64(1)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  1,
+				Member: "one",
+			}, {
+				Score:  3,
+				Member: "three",
+			}}))
+		})
+
+		It("should ZRemRangeByRank", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			zRemRangeByRank := client.ZRemRangeByRank("zset", 0, 1)
+			Expect(zRemRangeByRank.Err()).NotTo(HaveOccurred())
+			Expect(zRemRangeByRank.Val()).To(Equal(int64(2)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  3,
+				Member: "three",
+			}}))
+		})
+
+		It("should ZRemRangeByScore", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			zRemRangeByScore := client.ZRemRangeByScore("zset", "-inf", "(2")
+			Expect(zRemRangeByScore.Err()).NotTo(HaveOccurred())
+			Expect(zRemRangeByScore.Val()).To(Equal(int64(1)))
+
+			vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  3,
+				Member: "three",
+			}}))
+		})
+
+		It("should ZRemRangeByLex", func() {
+			zz := []*redis.Z{
+				{Score: 0, Member: "aaaa"},
+				{Score: 0, Member: "b"},
+				{Score: 0, Member: "c"},
+				{Score: 0, Member: "d"},
+				{Score: 0, Member: "e"},
+				{Score: 0, Member: "foo"},
+				{Score: 0, Member: "zap"},
+				{Score: 0, Member: "zip"},
+				{Score: 0, Member: "ALPHA"},
+				{Score: 0, Member: "alpha"},
+			}
+			for _, z := range zz {
+				err := client.ZAdd("zset", z).Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+
+			n, err := client.ZRemRangeByLex("zset", "[alpha", "[omega").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(6)))
+
+			vals, err := client.ZRange("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]string{"ALPHA", "aaaa", "zap", "zip"}))
+		})
+
+		It("should ZRevRange", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			zRevRange := client.ZRevRange("zset", 0, -1)
+			Expect(zRevRange.Err()).NotTo(HaveOccurred())
+			Expect(zRevRange.Val()).To(Equal([]string{"three", "two", "one"}))
+
+			zRevRange = client.ZRevRange("zset", 2, 3)
+			Expect(zRevRange.Err()).NotTo(HaveOccurred())
+			Expect(zRevRange.Val()).To(Equal([]string{"one"}))
+
+			zRevRange = client.ZRevRange("zset", -2, -1)
+			Expect(zRevRange.Err()).NotTo(HaveOccurred())
+			Expect(zRevRange.Val()).To(Equal([]string{"two", "one"}))
+		})
+
+		It("should ZRevRangeWithScoresMap", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			val, err := client.ZRevRangeWithScores("zset", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal([]redis.Z{{
+				Score:  3,
+				Member: "three",
+			}, {
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  1,
+				Member: "one",
+			}}))
+
+			val, err = client.ZRevRangeWithScores("zset", 2, 3).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+			val, err = client.ZRevRangeWithScores("zset", -2, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal([]redis.Z{{
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  1,
+				Member: "one",
+			}}))
+		})
+
+		It("should ZRevRangeByScore", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			vals, err := client.ZRevRangeByScore(
+				"zset", &redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]string{"three", "two", "one"}))
+
+			vals, err = client.ZRevRangeByScore(
+				"zset", &redis.ZRangeBy{Max: "2", Min: "(1"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]string{"two"}))
+
+			vals, err = client.ZRevRangeByScore(
+				"zset", &redis.ZRangeBy{Max: "(2", Min: "(1"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]string{}))
+		})
+
+		It("should ZRevRangeByLex", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 0, Member: "a"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 0, Member: "b"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 0, Member: "c"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			vals, err := client.ZRevRangeByLex(
+				"zset", &redis.ZRangeBy{Max: "+", Min: "-"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]string{"c", "b", "a"}))
+
+			vals, err = client.ZRevRangeByLex(
+				"zset", &redis.ZRangeBy{Max: "[b", Min: "(a"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]string{"b"}))
+
+			vals, err = client.ZRevRangeByLex(
+				"zset", &redis.ZRangeBy{Max: "(b", Min: "(a"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]string{}))
+		})
+
+		It("should ZRevRangeByScoreWithScores", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			vals, err := client.ZRevRangeByScoreWithScores(
+				"zset", &redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  3,
+				Member: "three",
+			}, {
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  1,
+				Member: "one",
+			}}))
+		})
+
+		It("should ZRevRangeByScoreWithScoresMap", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			vals, err := client.ZRevRangeByScoreWithScores(
+				"zset", &redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{
+				Score:  3,
+				Member: "three",
+			}, {
+				Score:  2,
+				Member: "two",
+			}, {
+				Score:  1,
+				Member: "one",
+			}}))
+
+			vals, err = client.ZRevRangeByScoreWithScores(
+				"zset", &redis.ZRangeBy{Max: "2", Min: "(1"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "two"}}))
+
+			vals, err = client.ZRevRangeByScoreWithScores(
+				"zset", &redis.ZRangeBy{Max: "(2", Min: "(1"}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.Z{}))
+		})
+
+		It("should ZRevRank", func() {
+			err := client.ZAdd("zset", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			zRevRank := client.ZRevRank("zset", "one")
+			Expect(zRevRank.Err()).NotTo(HaveOccurred())
+			Expect(zRevRank.Val()).To(Equal(int64(2)))
+
+			zRevRank = client.ZRevRank("zset", "four")
+			Expect(zRevRank.Err()).To(Equal(redis.Nil))
+			Expect(zRevRank.Val()).To(Equal(int64(0)))
+		})
+
+		It("should ZScore", func() {
+			zAdd := client.ZAdd("zset", &redis.Z{Score: 1.001, Member: "one"})
+			Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+			zScore := client.ZScore("zset", "one")
+			Expect(zScore.Err()).NotTo(HaveOccurred())
+			Expect(zScore.Val()).To(Equal(float64(1.001)))
+		})
+
+		It("should ZUnionStore", func() {
+			err := client.ZAdd("zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			err = client.ZAdd("zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+			err = client.ZAdd("zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			n, err := client.ZUnionStore("out", &redis.ZStore{
+				Keys:    []string{"zset1", "zset2"},
+				Weights: []float64{2, 3},
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(3)))
+
+			val, err := client.ZRangeWithScores("out", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal([]redis.Z{{
+				Score:  5,
+				Member: "one",
+			}, {
+				Score:  9,
+				Member: "three",
+			}, {
+				Score:  10,
+				Member: "two",
+			}}))
+		})
+
+	})
+
+	Describe("streams", func() {
+		BeforeEach(func() {
+			id, err := client.XAdd(&redis.XAddArgs{
+				Stream: "stream",
+				ID:     "1-0",
+				Values: map[string]interface{}{"uno": "un"},
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(id).To(Equal("1-0"))
+
+			id, err = client.XAdd(&redis.XAddArgs{
+				Stream: "stream",
+				ID:     "2-0",
+				Values: map[string]interface{}{"dos": "deux"},
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(id).To(Equal("2-0"))
+
+			id, err = client.XAdd(&redis.XAddArgs{
+				Stream: "stream",
+				ID:     "3-0",
+				Values: map[string]interface{}{"tres": "troix"},
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(id).To(Equal("3-0"))
+		})
+
+		It("should XTrim", func() {
+			n, err := client.XTrim("stream", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(3)))
+		})
+
+		It("should XTrimApprox", func() {
+			n, err := client.XTrimApprox("stream", 0).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(3)))
+		})
+
+		It("should XAdd", func() {
+			id, err := client.XAdd(&redis.XAddArgs{
+				Stream: "stream",
+				Values: map[string]interface{}{"quatro": "quatre"},
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+
+			vals, err := client.XRange("stream", "-", "+").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.XMessage{
+				{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+				{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+				{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+				{ID: id, Values: map[string]interface{}{"quatro": "quatre"}},
+			}))
+		})
+
+		It("should XAdd with MaxLen", func() {
+			id, err := client.XAdd(&redis.XAddArgs{
+				Stream: "stream",
+				MaxLen: 1,
+				Values: map[string]interface{}{"quatro": "quatre"},
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+
+			vals, err := client.XRange("stream", "-", "+").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]redis.XMessage{
+				{ID: id, Values: map[string]interface{}{"quatro": "quatre"}},
+			}))
+		})
+
+		It("should XDel", func() {
+			n, err := client.XDel("stream", "1-0", "2-0", "3-0").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(3)))
+		})
+
+		It("should XLen", func() {
+			n, err := client.XLen("stream").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(3)))
+		})
+
+		It("should XRange", func() {
+			msgs, err := client.XRange("stream", "-", "+").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(msgs).To(Equal([]redis.XMessage{
+				{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+				{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+				{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+			}))
+
+			msgs, err = client.XRange("stream", "2", "+").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(msgs).To(Equal([]redis.XMessage{
+				{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+				{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+			}))
+
+			msgs, err = client.XRange("stream", "-", "2").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(msgs).To(Equal([]redis.XMessage{
+				{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+				{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+			}))
+		})
+
+		It("should XRangeN", func() {
+			msgs, err := client.XRangeN("stream", "-", "+", 2).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(msgs).To(Equal([]redis.XMessage{
+				{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+				{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+			}))
+
+			msgs, err = client.XRangeN("stream", "2", "+", 1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(msgs).To(Equal([]redis.XMessage{
+				{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+			}))
+
+			msgs, err = client.XRangeN("stream", "-", "2", 1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(msgs).To(Equal([]redis.XMessage{
+				{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+			}))
+		})
+
+		It("should XRevRange", func() {
+			msgs, err := client.XRevRange("stream", "+", "-").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(msgs).To(Equal([]redis.XMessage{
+				{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+				{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+				{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+			}))
+
+			msgs, err = client.XRevRange("stream", "+", "2").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(msgs).To(Equal([]redis.XMessage{
+				{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+				{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+			}))
+		})
+
+		It("should XRevRangeN", func() {
+			msgs, err := client.XRevRangeN("stream", "+", "-", 2).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(msgs).To(Equal([]redis.XMessage{
+				{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+				{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+			}))
+
+			msgs, err = client.XRevRangeN("stream", "+", "2", 1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(msgs).To(Equal([]redis.XMessage{
+				{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+			}))
+		})
+
+		It("should XRead", func() {
+			res, err := client.XReadStreams("stream", "0").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(Equal([]redis.XStream{{
+				Stream: "stream",
+				Messages: []redis.XMessage{
+					{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+					{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+					{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+				}},
+			}))
+
+			_, err = client.XReadStreams("stream", "3").Result()
+			Expect(err).To(Equal(redis.Nil))
+		})
+
+		It("should XRead", func() {
+			res, err := client.XRead(&redis.XReadArgs{
+				Streams: []string{"stream", "0"},
+				Count:   2,
+				Block:   100 * time.Millisecond,
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(Equal([]redis.XStream{{
+				Stream: "stream",
+				Messages: []redis.XMessage{
+					{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+					{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+				}},
+			}))
+
+			_, err = client.XRead(&redis.XReadArgs{
+				Streams: []string{"stream", "3"},
+				Count:   1,
+				Block:   100 * time.Millisecond,
+			}).Result()
+			Expect(err).To(Equal(redis.Nil))
+		})
+
+		Describe("group", func() {
+			BeforeEach(func() {
+				err := client.XGroupCreate("stream", "group", "0").Err()
+				Expect(err).NotTo(HaveOccurred())
+
+				res, err := client.XReadGroup(&redis.XReadGroupArgs{
+					Group:    "group",
+					Consumer: "consumer",
+					Streams:  []string{"stream", ">"},
+				}).Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(res).To(Equal([]redis.XStream{{
+					Stream: "stream",
+					Messages: []redis.XMessage{
+						{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+						{ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+						{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+					}},
+				}))
+			})
+
+			AfterEach(func() {
+				n, err := client.XGroupDestroy("stream", "group").Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(n).To(Equal(int64(1)))
+			})
+
+			It("should XReadGroup skip empty", func() {
+				n, err := client.XDel("stream", "2-0").Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(n).To(Equal(int64(1)))
+
+				res, err := client.XReadGroup(&redis.XReadGroupArgs{
+					Group:    "group",
+					Consumer: "consumer",
+					Streams:  []string{"stream", "0"},
+				}).Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(res).To(Equal([]redis.XStream{{
+					Stream: "stream",
+					Messages: []redis.XMessage{
+						{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+						{ID: "2-0", Values: nil},
+						{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+					}},
+				}))
+			})
+
+			It("should XGroupCreateMkStream", func() {
+				err := client.XGroupCreateMkStream("stream2", "group", "0").Err()
+				Expect(err).NotTo(HaveOccurred())
+
+				err = client.XGroupCreateMkStream("stream2", "group", "0").Err()
+				Expect(err).To(Equal(proto.RedisError("BUSYGROUP Consumer Group name already exists")))
+
+				n, err := client.XGroupDestroy("stream2", "group").Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(n).To(Equal(int64(1)))
+
+				n, err = client.Del("stream2").Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(n).To(Equal(int64(1)))
+			})
+
+			It("should XPending", func() {
+				info, err := client.XPending("stream", "group").Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(info).To(Equal(&redis.XPending{
+					Count:     3,
+					Lower:     "1-0",
+					Higher:    "3-0",
+					Consumers: map[string]int64{"consumer": 3},
+				}))
+
+				infoExt, err := client.XPendingExt(&redis.XPendingExtArgs{
+					Stream:   "stream",
+					Group:    "group",
+					Start:    "-",
+					End:      "+",
+					Count:    10,
+					Consumer: "consumer",
+				}).Result()
+				Expect(err).NotTo(HaveOccurred())
+				for i := range infoExt {
+					infoExt[i].Idle = 0
+				}
+				Expect(infoExt).To(Equal([]redis.XPendingExt{
+					{ID: "1-0", Consumer: "consumer", Idle: 0, RetryCount: 1},
+					{ID: "2-0", Consumer: "consumer", Idle: 0, RetryCount: 1},
+					{ID: "3-0", Consumer: "consumer", Idle: 0, RetryCount: 1},
+				}))
+
+				n, err := client.XGroupDelConsumer("stream", "group", "consumer").Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(n).To(Equal(int64(3)))
+			})
+
+			It("should XClaim", func() {
+				msgs, err := client.XClaim(&redis.XClaimArgs{
+					Stream:   "stream",
+					Group:    "group",
+					Consumer: "consumer",
+					Messages: []string{"1-0", "2-0", "3-0"},
+				}).Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(msgs).To(Equal([]redis.XMessage{{
+					ID:     "1-0",
+					Values: map[string]interface{}{"uno": "un"},
+				}, {
+					ID:     "2-0",
+					Values: map[string]interface{}{"dos": "deux"},
+				}, {
+					ID:     "3-0",
+					Values: map[string]interface{}{"tres": "troix"},
+				}}))
+
+				ids, err := client.XClaimJustID(&redis.XClaimArgs{
+					Stream:   "stream",
+					Group:    "group",
+					Consumer: "consumer",
+					Messages: []string{"1-0", "2-0", "3-0"},
+				}).Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(ids).To(Equal([]string{"1-0", "2-0", "3-0"}))
+			})
+
+			It("should XAck", func() {
+				n, err := client.XAck("stream", "group", "1-0", "2-0", "4-0").Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(n).To(Equal(int64(2)))
+			})
+		})
+	})
+
+	Describe("Geo add and radius search", func() {
+		BeforeEach(func() {
+			n, err := client.GeoAdd(
+				"Sicily",
+				&redis.GeoLocation{Longitude: 13.361389, Latitude: 38.115556, Name: "Palermo"},
+				&redis.GeoLocation{Longitude: 15.087269, Latitude: 37.502669, Name: "Catania"},
+			).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(2)))
+		})
+
+		It("should not add same geo location", func() {
+			geoAdd := client.GeoAdd(
+				"Sicily",
+				&redis.GeoLocation{Longitude: 13.361389, Latitude: 38.115556, Name: "Palermo"},
+			)
+			Expect(geoAdd.Err()).NotTo(HaveOccurred())
+			Expect(geoAdd.Val()).To(Equal(int64(0)))
+		})
+
+		It("should search geo radius", func() {
+			res, err := client.GeoRadius("Sicily", 15, 37, &redis.GeoRadiusQuery{
+				Radius: 200,
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(HaveLen(2))
+			Expect(res[0].Name).To(Equal("Palermo"))
+			Expect(res[1].Name).To(Equal("Catania"))
+		})
+
+		It("should geo radius and store the result", func() {
+			n, err := client.GeoRadiusStore("Sicily", 15, 37, &redis.GeoRadiusQuery{
+				Radius: 200,
+				Store:  "result",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(2)))
+
+			res, err := client.ZRangeWithScores("result", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(ContainElement(redis.Z{
+				Score:  3.479099956230698e+15,
+				Member: "Palermo",
+			}))
+			Expect(res).To(ContainElement(redis.Z{
+				Score:  3.479447370796909e+15,
+				Member: "Catania",
+			}))
+		})
+
+		It("should geo radius and store dist", func() {
+			n, err := client.GeoRadiusStore("Sicily", 15, 37, &redis.GeoRadiusQuery{
+				Radius:    200,
+				StoreDist: "result",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(int64(2)))
+
+			res, err := client.ZRangeWithScores("result", 0, -1).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(ContainElement(redis.Z{
+				Score:  190.44242984775784,
+				Member: "Palermo",
+			}))
+			Expect(res).To(ContainElement(redis.Z{
+				Score:  56.4412578701582,
+				Member: "Catania",
+			}))
+		})
+
+		It("should search geo radius with options", func() {
+			res, err := client.GeoRadius("Sicily", 15, 37, &redis.GeoRadiusQuery{
+				Radius:      200,
+				Unit:        "km",
+				WithGeoHash: true,
+				WithCoord:   true,
+				WithDist:    true,
+				Count:       2,
+				Sort:        "ASC",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(HaveLen(2))
+			Expect(res[1].Name).To(Equal("Palermo"))
+			Expect(res[1].Dist).To(Equal(190.4424))
+			Expect(res[1].GeoHash).To(Equal(int64(3479099956230698)))
+			Expect(res[1].Longitude).To(Equal(13.361389338970184))
+			Expect(res[1].Latitude).To(Equal(38.115556395496299))
+			Expect(res[0].Name).To(Equal("Catania"))
+			Expect(res[0].Dist).To(Equal(56.4413))
+			Expect(res[0].GeoHash).To(Equal(int64(3479447370796909)))
+			Expect(res[0].Longitude).To(Equal(15.087267458438873))
+			Expect(res[0].Latitude).To(Equal(37.50266842333162))
+		})
+
+		It("should search geo radius with WithDist=false", func() {
+			res, err := client.GeoRadius("Sicily", 15, 37, &redis.GeoRadiusQuery{
+				Radius:      200,
+				Unit:        "km",
+				WithGeoHash: true,
+				WithCoord:   true,
+				Count:       2,
+				Sort:        "ASC",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(HaveLen(2))
+			Expect(res[1].Name).To(Equal("Palermo"))
+			Expect(res[1].Dist).To(Equal(float64(0)))
+			Expect(res[1].GeoHash).To(Equal(int64(3479099956230698)))
+			Expect(res[1].Longitude).To(Equal(13.361389338970184))
+			Expect(res[1].Latitude).To(Equal(38.115556395496299))
+			Expect(res[0].Name).To(Equal("Catania"))
+			Expect(res[0].Dist).To(Equal(float64(0)))
+			Expect(res[0].GeoHash).To(Equal(int64(3479447370796909)))
+			Expect(res[0].Longitude).To(Equal(15.087267458438873))
+			Expect(res[0].Latitude).To(Equal(37.50266842333162))
+		})
+
+		It("should search geo radius by member with options", func() {
+			res, err := client.GeoRadiusByMember("Sicily", "Catania", &redis.GeoRadiusQuery{
+				Radius:      200,
+				Unit:        "km",
+				WithGeoHash: true,
+				WithCoord:   true,
+				WithDist:    true,
+				Count:       2,
+				Sort:        "ASC",
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(HaveLen(2))
+			Expect(res[0].Name).To(Equal("Catania"))
+			Expect(res[0].Dist).To(Equal(0.0))
+			Expect(res[0].GeoHash).To(Equal(int64(3479447370796909)))
+			Expect(res[0].Longitude).To(Equal(15.087267458438873))
+			Expect(res[0].Latitude).To(Equal(37.50266842333162))
+			Expect(res[1].Name).To(Equal("Palermo"))
+			Expect(res[1].Dist).To(Equal(166.2742))
+			Expect(res[1].GeoHash).To(Equal(int64(3479099956230698)))
+			Expect(res[1].Longitude).To(Equal(13.361389338970184))
+			Expect(res[1].Latitude).To(Equal(38.115556395496299))
+		})
+
+		It("should search geo radius with no results", func() {
+			res, err := client.GeoRadius("Sicily", 99, 37, &redis.GeoRadiusQuery{
+				Radius:      200,
+				Unit:        "km",
+				WithGeoHash: true,
+				WithCoord:   true,
+				WithDist:    true,
+			}).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(res).To(HaveLen(0))
+		})
+
+		It("should get geo distance with unit options", func() {
+			// From Redis CLI, note the difference in rounding in m vs
+			// km on Redis itself.
+			//
+			// GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"
+			// GEODIST Sicily Palermo Catania m
+			// "166274.15156960033"
+			// GEODIST Sicily Palermo Catania km
+			// "166.27415156960032"
+			dist, err := client.GeoDist("Sicily", "Palermo", "Catania", "km").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(dist).To(BeNumerically("~", 166.27, 0.01))
+
+			dist, err = client.GeoDist("Sicily", "Palermo", "Catania", "m").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(dist).To(BeNumerically("~", 166274.15, 0.01))
+		})
+
+		It("should get geo hash in string representation", func() {
+			hashes, err := client.GeoHash("Sicily", "Palermo", "Catania").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(hashes).To(ConsistOf([]string{"sqc8b49rny0", "sqdtr74hyu0"}))
+		})
+
+		It("should return geo position", func() {
+			pos, err := client.GeoPos("Sicily", "Palermo", "Catania", "NonExisting").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(pos).To(ConsistOf([]*redis.GeoPos{
+				{
+					Longitude: 13.361389338970184,
+					Latitude:  38.1155563954963,
+				},
+				{
+					Longitude: 15.087267458438873,
+					Latitude:  37.50266842333162,
+				},
+				nil,
+			}))
+		})
+	})
+
+	Describe("marshaling/unmarshaling", func() {
+
+		type convTest struct {
+			value  interface{}
+			wanted string
+			dest   interface{}
+		}
+
+		convTests := []convTest{
+			{nil, "", nil},
+			{"hello", "hello", new(string)},
+			{[]byte("hello"), "hello", new([]byte)},
+			{int(1), "1", new(int)},
+			{int8(1), "1", new(int8)},
+			{int16(1), "1", new(int16)},
+			{int32(1), "1", new(int32)},
+			{int64(1), "1", new(int64)},
+			{uint(1), "1", new(uint)},
+			{uint8(1), "1", new(uint8)},
+			{uint16(1), "1", new(uint16)},
+			{uint32(1), "1", new(uint32)},
+			{uint64(1), "1", new(uint64)},
+			{float32(1.0), "1", new(float32)},
+			{float64(1.0), "1", new(float64)},
+			{true, "1", new(bool)},
+			{false, "0", new(bool)},
+		}
+
+		It("should convert to string", func() {
+			for _, test := range convTests {
+				err := client.Set("key", test.value, 0).Err()
+				Expect(err).NotTo(HaveOccurred())
+
+				s, err := client.Get("key").Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(s).To(Equal(test.wanted))
+
+				if test.dest == nil {
+					continue
+				}
+
+				err = client.Get("key").Scan(test.dest)
+				Expect(err).NotTo(HaveOccurred())
+				Expect(deref(test.dest)).To(Equal(test.value))
+			}
+		})
+
+	})
+
+	Describe("json marshaling/unmarshaling", func() {
+
+		BeforeEach(func() {
+			value := &numberStruct{Number: 42}
+			err := client.Set("key", value, 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		It("should marshal custom values using json", func() {
+			s, err := client.Get("key").Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(s).To(Equal(`{"Number":42}`))
+		})
+
+		It("should scan custom values using json", func() {
+			value := &numberStruct{}
+			err := client.Get("key").Scan(value)
+			Expect(err).NotTo(HaveOccurred())
+			Expect(value.Number).To(Equal(42))
+		})
+
+	})
+
+	Describe("Eval", func() {
+
+		It("returns keys and values", func() {
+			vals, err := client.Eval(
+				"return {KEYS[1],ARGV[1]}",
+				[]string{"key"},
+				"hello",
+			).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]interface{}{"key", "hello"}))
+		})
+
+		It("returns all values after an error", func() {
+			vals, err := client.Eval(
+				`return {12, {err="error"}, "abc"}`,
+				nil,
+			).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(vals).To(Equal([]interface{}{int64(12), proto.RedisError("error"), "abc"}))
+		})
+
+	})
+
+})
+
+type numberStruct struct {
+	Number int
+}
+
+func (s *numberStruct) MarshalBinary() ([]byte, error) {
+	return json.Marshal(s)
+}
+
+func (s *numberStruct) UnmarshalBinary(b []byte) error {
+	return json.Unmarshal(b, s)
+}
+
+func deref(viface interface{}) interface{} {
+	v := reflect.ValueOf(viface)
+	for v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+	return v.Interface()
+}

+ 4 - 0
doc.go

@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis

+ 93 - 0
error.go

@@ -0,0 +1,93 @@
+package redis
+
+import (
+	"context"
+	"io"
+	"net"
+	"strings"
+
+	"github.com/go-redis/redis/internal/proto"
+)
+
+func isRetryableError(err error, retryTimeout bool) bool {
+	switch err {
+	case nil, context.Canceled, context.DeadlineExceeded:
+		return false
+	case io.EOF:
+		return true
+	}
+	if netErr, ok := err.(net.Error); ok {
+		if netErr.Timeout() {
+			return retryTimeout
+		}
+		return true
+	}
+
+	s := err.Error()
+	if s == "ERR max number of clients reached" {
+		return true
+	}
+	if strings.HasPrefix(s, "LOADING ") {
+		return true
+	}
+	if strings.HasPrefix(s, "READONLY ") {
+		return true
+	}
+	if strings.HasPrefix(s, "CLUSTERDOWN ") {
+		return true
+	}
+	return false
+}
+
+func isRedisError(err error) bool {
+	_, ok := err.(proto.RedisError)
+	return ok
+}
+
+func isBadConn(err error, allowTimeout bool) bool {
+	if err == nil {
+		return false
+	}
+	if isRedisError(err) {
+		// Close connections in read only state in case domain addr is used
+		// and domain resolves to a different Redis Server. See #790.
+		return isReadOnlyError(err)
+	}
+	if allowTimeout {
+		if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+			return false
+		}
+	}
+	return true
+}
+
+func isMovedError(err error) (moved bool, ask bool, addr string) {
+	if !isRedisError(err) {
+		return
+	}
+
+	s := err.Error()
+	switch {
+	case strings.HasPrefix(s, "MOVED "):
+		moved = true
+	case strings.HasPrefix(s, "ASK "):
+		ask = true
+	default:
+		return
+	}
+
+	ind := strings.LastIndex(s, " ")
+	if ind == -1 {
+		return false, false, ""
+	}
+	addr = s[ind+1:]
+	return
+}
+
+func isLoadingError(err error) bool {
+	return strings.HasPrefix(err.Error(), "LOADING ")
+}
+
+func isReadOnlyError(err error) bool {
+	return strings.HasPrefix(err.Error(), "READONLY ")
+}

+ 80 - 0
example_instrumentation_test.go

@@ -0,0 +1,80 @@
+package redis_test
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/go-redis/redis/v7"
+)
+
+type redisHook struct{}
+
+var _ redis.Hook = redisHook{}
+
+func (redisHook) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
+	fmt.Printf("starting processing: <%s>\n", cmd)
+	return ctx, nil
+}
+
+func (redisHook) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
+	fmt.Printf("finished processing: <%s>\n", cmd)
+	return nil
+}
+
+func (redisHook) BeforeProcessPipeline(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+	fmt.Printf("pipeline starting processing: %v\n", cmds)
+	return ctx, nil
+}
+
+func (redisHook) AfterProcessPipeline(ctx context.Context, cmds []redis.Cmder) error {
+	fmt.Printf("pipeline finished processing: %v\n", cmds)
+	return nil
+}
+
+func Example_instrumentation() {
+	rdb := redis.NewClient(&redis.Options{
+		Addr: ":6379",
+	})
+	rdb.AddHook(redisHook{})
+
+	rdb.Ping()
+	// Output: starting processing: <ping: >
+	// finished processing: <ping: PONG>
+}
+
+func ExamplePipeline_instrumentation() {
+	rdb := redis.NewClient(&redis.Options{
+		Addr: ":6379",
+	})
+	rdb.AddHook(redisHook{})
+
+	rdb.Pipelined(func(pipe redis.Pipeliner) error {
+		pipe.Ping()
+		pipe.Ping()
+		return nil
+	})
+	// Output: pipeline starting processing: [ping:  ping: ]
+	// pipeline finished processing: [ping: PONG ping: PONG]
+}
+
+func ExampleWatch_instrumentation() {
+	rdb := redis.NewClient(&redis.Options{
+		Addr: ":6379",
+	})
+	rdb.AddHook(redisHook{})
+
+	rdb.Watch(func(tx *redis.Tx) error {
+		tx.Ping()
+		tx.Ping()
+		return nil
+	}, "foo")
+	// Output:
+	// starting processing: <watch foo: >
+	// finished processing: <watch foo: OK>
+	// starting processing: <ping: >
+	// finished processing: <ping: PONG>
+	// starting processing: <ping: >
+	// finished processing: <ping: PONG>
+	// starting processing: <unwatch: >
+	// finished processing: <unwatch: OK>
+}

+ 501 - 0
example_test.go

@@ -0,0 +1,501 @@
+package redis_test
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/v7"
+)
+
+var rdb *redis.Client
+
+func init() {
+	rdb = redis.NewClient(&redis.Options{
+		Addr:         ":6379",
+		DialTimeout:  10 * time.Second,
+		ReadTimeout:  30 * time.Second,
+		WriteTimeout: 30 * time.Second,
+		PoolSize:     10,
+		PoolTimeout:  30 * time.Second,
+	})
+}
+
+func ExampleNewClient() {
+	rdb := redis.NewClient(&redis.Options{
+		Addr:     "localhost:6379", // use default Addr
+		Password: "",               // no password set
+		DB:       0,                // use default DB
+	})
+
+	pong, err := rdb.Ping().Result()
+	fmt.Println(pong, err)
+	// Output: PONG <nil>
+}
+
+func ExampleParseURL() {
+	opt, err := redis.ParseURL("redis://:qwerty@localhost:6379/1")
+	if err != nil {
+		panic(err)
+	}
+	fmt.Println("addr is", opt.Addr)
+	fmt.Println("db is", opt.DB)
+	fmt.Println("password is", opt.Password)
+
+	// Create client as usually.
+	_ = redis.NewClient(opt)
+
+	// Output: addr is localhost:6379
+	// db is 1
+	// password is qwerty
+}
+
+func ExampleNewFailoverClient() {
+	// See http://redis.io/topics/sentinel for instructions how to
+	// setup Redis Sentinel.
+	rdb := redis.NewFailoverClient(&redis.FailoverOptions{
+		MasterName:    "master",
+		SentinelAddrs: []string{":26379"},
+	})
+	rdb.Ping()
+}
+
+func ExampleNewClusterClient() {
+	// See http://redis.io/topics/cluster-tutorial for instructions
+	// how to setup Redis Cluster.
+	rdb := redis.NewClusterClient(&redis.ClusterOptions{
+		Addrs: []string{":7000", ":7001", ":7002", ":7003", ":7004", ":7005"},
+	})
+	rdb.Ping()
+}
+
+// Following example creates a cluster from 2 master nodes and 2 slave nodes
+// without using cluster mode or Redis Sentinel.
+func ExampleNewClusterClient_manualSetup() {
+	// clusterSlots returns cluster slots information.
+	// It can use service like ZooKeeper to maintain configuration information
+	// and Cluster.ReloadState to manually trigger state reloading.
+	clusterSlots := func() ([]redis.ClusterSlot, error) {
+		slots := []redis.ClusterSlot{
+			// First node with 1 master and 1 slave.
+			{
+				Start: 0,
+				End:   8191,
+				Nodes: []redis.ClusterNode{{
+					Addr: ":7000", // master
+				}, {
+					Addr: ":8000", // 1st slave
+				}},
+			},
+			// Second node with 1 master and 1 slave.
+			{
+				Start: 8192,
+				End:   16383,
+				Nodes: []redis.ClusterNode{{
+					Addr: ":7001", // master
+				}, {
+					Addr: ":8001", // 1st slave
+				}},
+			},
+		}
+		return slots, nil
+	}
+
+	rdb := redis.NewClusterClient(&redis.ClusterOptions{
+		ClusterSlots:  clusterSlots,
+		RouteRandomly: true,
+	})
+	rdb.Ping()
+
+	// ReloadState reloads cluster state. It calls ClusterSlots func
+	// to get cluster slots information.
+	err := rdb.ReloadState()
+	if err != nil {
+		panic(err)
+	}
+}
+
+func ExampleNewRing() {
+	rdb := redis.NewRing(&redis.RingOptions{
+		Addrs: map[string]string{
+			"shard1": ":7000",
+			"shard2": ":7001",
+			"shard3": ":7002",
+		},
+	})
+	rdb.Ping()
+}
+
+func ExampleClient() {
+	err := rdb.Set("key", "value", 0).Err()
+	if err != nil {
+		panic(err)
+	}
+
+	val, err := rdb.Get("key").Result()
+	if err != nil {
+		panic(err)
+	}
+	fmt.Println("key", val)
+
+	val2, err := rdb.Get("missing_key").Result()
+	if err == redis.Nil {
+		fmt.Println("missing_key does not exist")
+	} else if err != nil {
+		panic(err)
+	} else {
+		fmt.Println("missing_key", val2)
+	}
+	// Output: key value
+	// missing_key does not exist
+}
+
+func ExampleConn() {
+	conn := rdb.Conn()
+
+	err := conn.ClientSetName("foobar").Err()
+	if err != nil {
+		panic(err)
+	}
+
+	// Open other connections.
+	for i := 0; i < 10; i++ {
+		go rdb.Ping()
+	}
+
+	s, err := conn.ClientGetName().Result()
+	if err != nil {
+		panic(err)
+	}
+	fmt.Println(s)
+	// Output: foobar
+}
+
+func ExampleClient_Set() {
+	// Last argument is expiration. Zero means the key has no
+	// expiration time.
+	err := rdb.Set("key", "value", 0).Err()
+	if err != nil {
+		panic(err)
+	}
+
+	// key2 will expire in an hour.
+	err = rdb.Set("key2", "value", time.Hour).Err()
+	if err != nil {
+		panic(err)
+	}
+}
+
+func ExampleClient_Incr() {
+	result, err := rdb.Incr("counter").Result()
+	if err != nil {
+		panic(err)
+	}
+
+	fmt.Println(result)
+	// Output: 1
+}
+
+func ExampleClient_BLPop() {
+	if err := rdb.RPush("queue", "message").Err(); err != nil {
+		panic(err)
+	}
+
+	// use `rdb.BLPop(0, "queue")` for infinite waiting time
+	result, err := rdb.BLPop(1*time.Second, "queue").Result()
+	if err != nil {
+		panic(err)
+	}
+
+	fmt.Println(result[0], result[1])
+	// Output: queue message
+}
+
+func ExampleClient_Scan() {
+	rdb.FlushDB()
+	for i := 0; i < 33; i++ {
+		err := rdb.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
+		if err != nil {
+			panic(err)
+		}
+	}
+
+	var cursor uint64
+	var n int
+	for {
+		var keys []string
+		var err error
+		keys, cursor, err = rdb.Scan(cursor, "key*", 10).Result()
+		if err != nil {
+			panic(err)
+		}
+		n += len(keys)
+		if cursor == 0 {
+			break
+		}
+	}
+
+	fmt.Printf("found %d keys\n", n)
+	// Output: found 33 keys
+}
+
+func ExampleClient_Pipelined() {
+	var incr *redis.IntCmd
+	_, err := rdb.Pipelined(func(pipe redis.Pipeliner) error {
+		incr = pipe.Incr("pipelined_counter")
+		pipe.Expire("pipelined_counter", time.Hour)
+		return nil
+	})
+	fmt.Println(incr.Val(), err)
+	// Output: 1 <nil>
+}
+
+func ExampleClient_Pipeline() {
+	pipe := rdb.Pipeline()
+
+	incr := pipe.Incr("pipeline_counter")
+	pipe.Expire("pipeline_counter", time.Hour)
+
+	// Execute
+	//
+	//     INCR pipeline_counter
+	//     EXPIRE pipeline_counts 3600
+	//
+	// using one rdb-server roundtrip.
+	_, err := pipe.Exec()
+	fmt.Println(incr.Val(), err)
+	// Output: 1 <nil>
+}
+
+func ExampleClient_TxPipelined() {
+	var incr *redis.IntCmd
+	_, err := rdb.TxPipelined(func(pipe redis.Pipeliner) error {
+		incr = pipe.Incr("tx_pipelined_counter")
+		pipe.Expire("tx_pipelined_counter", time.Hour)
+		return nil
+	})
+	fmt.Println(incr.Val(), err)
+	// Output: 1 <nil>
+}
+
+func ExampleClient_TxPipeline() {
+	pipe := rdb.TxPipeline()
+
+	incr := pipe.Incr("tx_pipeline_counter")
+	pipe.Expire("tx_pipeline_counter", time.Hour)
+
+	// Execute
+	//
+	//     MULTI
+	//     INCR pipeline_counter
+	//     EXPIRE pipeline_counts 3600
+	//     EXEC
+	//
+	// using one rdb-server roundtrip.
+	_, err := pipe.Exec()
+	fmt.Println(incr.Val(), err)
+	// Output: 1 <nil>
+}
+
+func ExampleClient_Watch() {
+	const routineCount = 100
+
+	// Transactionally increments key using GET and SET commands.
+	increment := func(key string) error {
+		txf := func(tx *redis.Tx) error {
+			// get current value or zero
+			n, err := tx.Get(key).Int()
+			if err != nil && err != redis.Nil {
+				return err
+			}
+
+			// actual opperation (local in optimistic lock)
+			n++
+
+			// runs only if the watched keys remain unchanged
+			_, err = tx.TxPipelined(func(pipe redis.Pipeliner) error {
+				// pipe handles the error case
+				pipe.Set(key, n, 0)
+				return nil
+			})
+			return err
+		}
+
+		for retries := routineCount; retries > 0; retries-- {
+			err := rdb.Watch(txf, key)
+			if err != redis.TxFailedErr {
+				return err
+			}
+			// optimistic lock lost
+		}
+		return errors.New("increment reached maximum number of retries")
+	}
+
+	var wg sync.WaitGroup
+	wg.Add(routineCount)
+	for i := 0; i < routineCount; i++ {
+		go func() {
+			defer wg.Done()
+
+			if err := increment("counter3"); err != nil {
+				fmt.Println("increment error:", err)
+			}
+		}()
+	}
+	wg.Wait()
+
+	n, err := rdb.Get("counter3").Int()
+	fmt.Println("ended with", n, err)
+	// Output: ended with 100 <nil>
+}
+
+func ExamplePubSub() {
+	pubsub := rdb.Subscribe("mychannel1")
+
+	// Wait for confirmation that subscription is created before publishing anything.
+	_, err := pubsub.Receive()
+	if err != nil {
+		panic(err)
+	}
+
+	// Go channel which receives messages.
+	ch := pubsub.Channel()
+
+	// Publish a message.
+	err = rdb.Publish("mychannel1", "hello").Err()
+	if err != nil {
+		panic(err)
+	}
+
+	time.AfterFunc(time.Second, func() {
+		// When pubsub is closed channel is closed too.
+		_ = pubsub.Close()
+	})
+
+	// Consume messages.
+	for msg := range ch {
+		fmt.Println(msg.Channel, msg.Payload)
+	}
+
+	// Output: mychannel1 hello
+}
+
+func ExamplePubSub_Receive() {
+	pubsub := rdb.Subscribe("mychannel2")
+	defer pubsub.Close()
+
+	for i := 0; i < 2; i++ {
+		// ReceiveTimeout is a low level API. Use ReceiveMessage instead.
+		msgi, err := pubsub.ReceiveTimeout(time.Second)
+		if err != nil {
+			break
+		}
+
+		switch msg := msgi.(type) {
+		case *redis.Subscription:
+			fmt.Println("subscribed to", msg.Channel)
+
+			_, err := rdb.Publish("mychannel2", "hello").Result()
+			if err != nil {
+				panic(err)
+			}
+		case *redis.Message:
+			fmt.Println("received", msg.Payload, "from", msg.Channel)
+		default:
+			panic("unreached")
+		}
+	}
+
+	// sent message to 1 rdb
+	// received hello from mychannel2
+}
+
+func ExampleScript() {
+	IncrByXX := redis.NewScript(`
+		if redis.call("GET", KEYS[1]) ~= false then
+			return redis.call("INCRBY", KEYS[1], ARGV[1])
+		end
+		return false
+	`)
+
+	n, err := IncrByXX.Run(rdb, []string{"xx_counter"}, 2).Result()
+	fmt.Println(n, err)
+
+	err = rdb.Set("xx_counter", "40", 0).Err()
+	if err != nil {
+		panic(err)
+	}
+
+	n, err = IncrByXX.Run(rdb, []string{"xx_counter"}, 2).Result()
+	fmt.Println(n, err)
+
+	// Output: <nil> redis: nil
+	// 42 <nil>
+}
+
+func Example_customCommand() {
+	Get := func(rdb *redis.Client, key string) *redis.StringCmd {
+		cmd := redis.NewStringCmd("get", key)
+		rdb.Process(cmd)
+		return cmd
+	}
+
+	v, err := Get(rdb, "key_does_not_exist").Result()
+	fmt.Printf("%q %s", v, err)
+	// Output: "" redis: nil
+}
+
+func Example_customCommand2() {
+	v, err := rdb.Do("get", "key_does_not_exist").String()
+	fmt.Printf("%q %s", v, err)
+	// Output: "" redis: nil
+}
+
+func ExampleScanIterator() {
+	iter := rdb.Scan(0, "", 0).Iterator()
+	for iter.Next() {
+		fmt.Println(iter.Val())
+	}
+	if err := iter.Err(); err != nil {
+		panic(err)
+	}
+}
+
+func ExampleScanCmd_Iterator() {
+	iter := rdb.Scan(0, "", 0).Iterator()
+	for iter.Next() {
+		fmt.Println(iter.Val())
+	}
+	if err := iter.Err(); err != nil {
+		panic(err)
+	}
+}
+
+func ExampleNewUniversalClient_simple() {
+	rdb := redis.NewUniversalClient(&redis.UniversalOptions{
+		Addrs: []string{":6379"},
+	})
+	defer rdb.Close()
+
+	rdb.Ping()
+}
+
+func ExampleNewUniversalClient_failover() {
+	rdb := redis.NewUniversalClient(&redis.UniversalOptions{
+		MasterName: "master",
+		Addrs:      []string{":26379"},
+	})
+	defer rdb.Close()
+
+	rdb.Ping()
+}
+
+func ExampleNewUniversalClient_cluster() {
+	rdb := redis.NewUniversalClient(&redis.UniversalOptions{
+		Addrs: []string{":7000", ":7001", ":7002", ":7003", ":7004", ":7005"},
+	})
+	defer rdb.Close()
+
+	rdb.Ping()
+}

+ 82 - 0
export_test.go

@@ -0,0 +1,82 @@
+package redis
+
+import (
+	"fmt"
+	"net"
+	"strings"
+
+	"github.com/go-redis/redis/v7/internal/hashtag"
+	"github.com/go-redis/redis/v7/internal/pool"
+)
+
+func (c *baseClient) Pool() pool.Pooler {
+	return c.connPool
+}
+
+func (c *PubSub) SetNetConn(netConn net.Conn) {
+	c.cn = pool.NewConn(netConn)
+}
+
+func (c *ClusterClient) LoadState() (*clusterState, error) {
+	return c.loadState()
+}
+
+func (c *ClusterClient) SlotAddrs(slot int) []string {
+	state, err := c.state.Get()
+	if err != nil {
+		panic(err)
+	}
+
+	var addrs []string
+	for _, n := range state.slotNodes(slot) {
+		addrs = append(addrs, n.Client.getAddr())
+	}
+	return addrs
+}
+
+func (c *ClusterClient) Nodes(key string) ([]*clusterNode, error) {
+	state, err := c.state.Reload()
+	if err != nil {
+		return nil, err
+	}
+
+	slot := hashtag.Slot(key)
+	nodes := state.slotNodes(slot)
+	if len(nodes) != 2 {
+		return nil, fmt.Errorf("slot=%d does not have enough nodes: %v", slot, nodes)
+	}
+	return nodes, nil
+}
+
+func (c *ClusterClient) SwapNodes(key string) error {
+	nodes, err := c.Nodes(key)
+	if err != nil {
+		return err
+	}
+	nodes[0], nodes[1] = nodes[1], nodes[0]
+	return nil
+}
+
+func (state *clusterState) IsConsistent() bool {
+	if len(state.Masters) < 3 {
+		return false
+	}
+	for _, master := range state.Masters {
+		s := master.Client.Info("replication").Val()
+		if !strings.Contains(s, "role:master") {
+			return false
+		}
+	}
+
+	if len(state.Slaves) < 3 {
+		return false
+	}
+	for _, slave := range state.Slaves {
+		s := slave.Client.Info("replication").Val()
+		if !strings.Contains(s, "role:slave") {
+			return false
+		}
+	}
+
+	return true
+}

+ 16 - 0
go.mod

@@ -0,0 +1,16 @@
+module github.com/go-redis/redis/v7
+
+require (
+	github.com/go-redis/redis v6.15.7+incompatible
+	github.com/golang/protobuf v1.3.2 // indirect
+	github.com/kr/pretty v0.1.0 // indirect
+	github.com/onsi/ginkgo v1.10.1
+	github.com/onsi/gomega v1.7.0
+	golang.org/x/net v0.0.0-20190923162816-aa69164e4478 // indirect
+	golang.org/x/sys v0.0.0-20191010194322-b09406accb47 // indirect
+	golang.org/x/text v0.3.2 // indirect
+	gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
+	gopkg.in/yaml.v2 v2.2.4 // indirect
+)
+
+go 1.11

+ 49 - 0
go.sum

@@ -0,0 +1,49 @@
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
+github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
+golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

+ 81 - 0
internal/consistenthash/consistenthash.go

@@ -0,0 +1,81 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package consistenthash provides an implementation of a ring hash.
+package consistenthash
+
+import (
+	"hash/crc32"
+	"sort"
+	"strconv"
+)
+
+type Hash func(data []byte) uint32
+
+type Map struct {
+	hash     Hash
+	replicas int
+	keys     []int // Sorted
+	hashMap  map[int]string
+}
+
+func New(replicas int, fn Hash) *Map {
+	m := &Map{
+		replicas: replicas,
+		hash:     fn,
+		hashMap:  make(map[int]string),
+	}
+	if m.hash == nil {
+		m.hash = crc32.ChecksumIEEE
+	}
+	return m
+}
+
+// Returns true if there are no items available.
+func (m *Map) IsEmpty() bool {
+	return len(m.keys) == 0
+}
+
+// Adds some keys to the hash.
+func (m *Map) Add(keys ...string) {
+	for _, key := range keys {
+		for i := 0; i < m.replicas; i++ {
+			hash := int(m.hash([]byte(strconv.Itoa(i) + key)))
+			m.keys = append(m.keys, hash)
+			m.hashMap[hash] = key
+		}
+	}
+	sort.Ints(m.keys)
+}
+
+// Gets the closest item in the hash to the provided key.
+func (m *Map) Get(key string) string {
+	if m.IsEmpty() {
+		return ""
+	}
+
+	hash := int(m.hash([]byte(key)))
+
+	// Binary search for appropriate replica.
+	idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })
+
+	// Means we have cycled back to the first replica.
+	if idx == len(m.keys) {
+		idx = 0
+	}
+
+	return m.hashMap[m.keys[idx]]
+}

+ 110 - 0
internal/consistenthash/consistenthash_test.go

@@ -0,0 +1,110 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package consistenthash
+
+import (
+	"fmt"
+	"strconv"
+	"testing"
+)
+
+func TestHashing(t *testing.T) {
+
+	// Override the hash function to return easier to reason about values. Assumes
+	// the keys can be converted to an integer.
+	hash := New(3, func(key []byte) uint32 {
+		i, err := strconv.Atoi(string(key))
+		if err != nil {
+			panic(err)
+		}
+		return uint32(i)
+	})
+
+	// Given the above hash function, this will give replicas with "hashes":
+	// 2, 4, 6, 12, 14, 16, 22, 24, 26
+	hash.Add("6", "4", "2")
+
+	testCases := map[string]string{
+		"2":  "2",
+		"11": "2",
+		"23": "4",
+		"27": "2",
+	}
+
+	for k, v := range testCases {
+		if hash.Get(k) != v {
+			t.Errorf("Asking for %s, should have yielded %s", k, v)
+		}
+	}
+
+	// Adds 8, 18, 28
+	hash.Add("8")
+
+	// 27 should now map to 8.
+	testCases["27"] = "8"
+
+	for k, v := range testCases {
+		if hash.Get(k) != v {
+			t.Errorf("Asking for %s, should have yielded %s", k, v)
+		}
+	}
+
+}
+
+func TestConsistency(t *testing.T) {
+	hash1 := New(1, nil)
+	hash2 := New(1, nil)
+
+	hash1.Add("Bill", "Bob", "Bonny")
+	hash2.Add("Bob", "Bonny", "Bill")
+
+	if hash1.Get("Ben") != hash2.Get("Ben") {
+		t.Errorf("Fetching 'Ben' from both hashes should be the same")
+	}
+
+	hash2.Add("Becky", "Ben", "Bobby")
+
+	if hash1.Get("Ben") != hash2.Get("Ben") ||
+		hash1.Get("Bob") != hash2.Get("Bob") ||
+		hash1.Get("Bonny") != hash2.Get("Bonny") {
+		t.Errorf("Direct matches should always return the same entry")
+	}
+
+}
+
+func BenchmarkGet8(b *testing.B)   { benchmarkGet(b, 8) }
+func BenchmarkGet32(b *testing.B)  { benchmarkGet(b, 32) }
+func BenchmarkGet128(b *testing.B) { benchmarkGet(b, 128) }
+func BenchmarkGet512(b *testing.B) { benchmarkGet(b, 512) }
+
+func benchmarkGet(b *testing.B, shards int) {
+
+	hash := New(50, nil)
+
+	var buckets []string
+	for i := 0; i < shards; i++ {
+		buckets = append(buckets, fmt.Sprintf("shard-%d", i))
+	}
+
+	hash.Add(buckets...)
+
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		hash.Get(buckets[i&(shards-1)])
+	}
+}

+ 77 - 0
internal/hashtag/hashtag.go

@@ -0,0 +1,77 @@
+package hashtag
+
+import (
+	"math/rand"
+	"strings"
+)
+
+const slotNumber = 16384
+
+// CRC16 implementation according to CCITT standards.
+// Copyright 2001-2010 Georges Menie (www.menie.org)
+// Copyright 2013 The Go Authors. All rights reserved.
+// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
+var crc16tab = [256]uint16{
+	0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+	0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+	0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+	0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+	0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+	0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+	0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+	0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+	0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+	0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+	0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+	0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+	0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+	0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+	0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+	0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+	0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+	0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+	0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+	0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+	0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+	0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+	0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+	0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+	0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+	0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+	0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+	0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+	0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+	0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+	0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+	0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
+}
+
+func Key(key string) string {
+	if s := strings.IndexByte(key, '{'); s > -1 {
+		if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
+			return key[s+1 : s+e+1]
+		}
+	}
+	return key
+}
+
+func RandomSlot() int {
+	return rand.Intn(slotNumber)
+}
+
+// hashSlot returns a consistent slot number between 0 and 16383
+// for any given string key.
+func Slot(key string) int {
+	if key == "" {
+		return RandomSlot()
+	}
+	key = Key(key)
+	return int(crc16sum(key)) % slotNumber
+}
+
+func crc16sum(key string) (crc uint16) {
+	for i := 0; i < len(key); i++ {
+		crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
+	}
+	return
+}

+ 74 - 0
internal/hashtag/hashtag_test.go

@@ -0,0 +1,74 @@
+package hashtag
+
+import (
+	"math/rand"
+	"testing"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+	RegisterFailHandler(Fail)
+	RunSpecs(t, "hashtag")
+}
+
+var _ = Describe("CRC16", func() {
+
+	// http://redis.io/topics/cluster-spec#keys-distribution-model
+	It("should calculate CRC16", func() {
+		tests := []struct {
+			s string
+			n uint16
+		}{
+			{"123456789", 0x31C3},
+			{string([]byte{83, 153, 134, 118, 229, 214, 244, 75, 140, 37, 215, 215}), 21847},
+		}
+
+		for _, test := range tests {
+			Expect(crc16sum(test.s)).To(Equal(test.n), "for %s", test.s)
+		}
+	})
+
+})
+
+var _ = Describe("HashSlot", func() {
+
+	It("should calculate hash slots", func() {
+		tests := []struct {
+			key  string
+			slot int
+		}{
+			{"123456789", 12739},
+			{"{}foo", 9500},
+			{"foo{}", 5542},
+			{"foo{}{bar}", 8363},
+			{"", 10503},
+			{"", 5176},
+			{string([]byte{83, 153, 134, 118, 229, 214, 244, 75, 140, 37, 215, 215}), 5463},
+		}
+		// Empty keys receive random slot.
+		rand.Seed(100)
+
+		for _, test := range tests {
+			Expect(Slot(test.key)).To(Equal(test.slot), "for %s", test.key)
+		}
+	})
+
+	It("should extract keys from tags", func() {
+		tests := []struct {
+			one, two string
+		}{
+			{"foo{bar}", "bar"},
+			{"{foo}bar", "foo"},
+			{"{user1000}.following", "{user1000}.followers"},
+			{"foo{{bar}}zap", "{bar"},
+			{"foo{bar}{zap}", "bar"},
+		}
+
+		for _, test := range tests {
+			Expect(Slot(test.one)).To(Equal(Slot(test.two)), "for %s <-> %s", test.one, test.two)
+		}
+	})
+
+})

+ 24 - 0
internal/internal.go

@@ -0,0 +1,24 @@
+package internal
+
+import (
+	"math/rand"
+	"time"
+)
+
+// Retry backoff with jitter sleep to prevent overloaded conditions during intervals
+// https://www.awsarchitectureblog.com/2015/03/backoff.html
+func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
+	if retry < 0 {
+		retry = 0
+	}
+
+	backoff := minBackoff << uint(retry)
+	if backoff > maxBackoff || backoff < minBackoff {
+		backoff = maxBackoff
+	}
+
+	if backoff == 0 {
+		return 0
+	}
+	return time.Duration(rand.Int63n(int64(backoff)))
+}

+ 18 - 0
internal/internal_test.go

@@ -0,0 +1,18 @@
+package internal
+
+import (
+	"testing"
+	"time"
+
+	. "github.com/onsi/gomega"
+)
+
+func TestRetryBackoff(t *testing.T) {
+	RegisterTestingT(t)
+
+	for i := -1; i <= 16; i++ {
+		backoff := RetryBackoff(i, time.Millisecond, 512*time.Millisecond)
+		Expect(backoff >= 0).To(BeTrue())
+		Expect(backoff <= 512*time.Millisecond).To(BeTrue())
+	}
+}

+ 8 - 0
internal/log.go

@@ -0,0 +1,8 @@
+package internal
+
+import (
+	"log"
+	"os"
+)
+
+var Logger = log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile)

+ 60 - 0
internal/once.go

@@ -0,0 +1,60 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+	"sync"
+	"sync/atomic"
+)
+
+// A Once will perform a successful action exactly once.
+//
+// Unlike a sync.Once, this Once's func returns an error
+// and is re-armed on failure.
+type Once struct {
+	m    sync.Mutex
+	done uint32
+}
+
+// Do calls the function f if and only if Do has not been invoked
+// without error for this instance of Once.  In other words, given
+// 	var once Once
+// if once.Do(f) is called multiple times, only the first call will
+// invoke f, even if f has a different value in each invocation unless
+// f returns an error.  A new instance of Once is required for each
+// function to execute.
+//
+// Do is intended for initialization that must be run exactly once.  Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+// 	err := config.once.Do(func() error { return config.init(filename) })
+func (o *Once) Do(f func() error) error {
+	if atomic.LoadUint32(&o.done) == 1 {
+		return nil
+	}
+	// Slow-path.
+	o.m.Lock()
+	defer o.m.Unlock()
+	var err error
+	if o.done == 0 {
+		err = f()
+		if err == nil {
+			atomic.StoreUint32(&o.done, 1)
+		}
+	}
+	return err
+}

+ 94 - 0
internal/pool/bench_test.go

@@ -0,0 +1,94 @@
+package pool_test
+
+import (
+	"context"
+	"fmt"
+	"testing"
+	"time"
+
+	"github.com/go-redis/redis/v7/internal/pool"
+)
+
+type poolGetPutBenchmark struct {
+	poolSize int
+}
+
+func (bm poolGetPutBenchmark) String() string {
+	return fmt.Sprintf("pool=%d", bm.poolSize)
+}
+
+func BenchmarkPoolGetPut(b *testing.B) {
+	benchmarks := []poolGetPutBenchmark{
+		{1},
+		{2},
+		{8},
+		{32},
+		{64},
+		{128},
+	}
+	for _, bm := range benchmarks {
+		b.Run(bm.String(), func(b *testing.B) {
+			connPool := pool.NewConnPool(&pool.Options{
+				Dialer:             dummyDialer,
+				PoolSize:           bm.poolSize,
+				PoolTimeout:        time.Second,
+				IdleTimeout:        time.Hour,
+				IdleCheckFrequency: time.Hour,
+			})
+
+			b.ResetTimer()
+
+			b.RunParallel(func(pb *testing.PB) {
+				for pb.Next() {
+					cn, err := connPool.Get(context.Background())
+					if err != nil {
+						b.Fatal(err)
+					}
+					connPool.Put(cn)
+				}
+			})
+		})
+	}
+}
+
+type poolGetRemoveBenchmark struct {
+	poolSize int
+}
+
+func (bm poolGetRemoveBenchmark) String() string {
+	return fmt.Sprintf("pool=%d", bm.poolSize)
+}
+
+func BenchmarkPoolGetRemove(b *testing.B) {
+	benchmarks := []poolGetRemoveBenchmark{
+		{1},
+		{2},
+		{8},
+		{32},
+		{64},
+		{128},
+	}
+	for _, bm := range benchmarks {
+		b.Run(bm.String(), func(b *testing.B) {
+			connPool := pool.NewConnPool(&pool.Options{
+				Dialer:             dummyDialer,
+				PoolSize:           bm.poolSize,
+				PoolTimeout:        time.Second,
+				IdleTimeout:        time.Hour,
+				IdleCheckFrequency: time.Hour,
+			})
+
+			b.ResetTimer()
+
+			b.RunParallel(func(pb *testing.PB) {
+				for pb.Next() {
+					cn, err := connPool.Get(context.Background())
+					if err != nil {
+						b.Fatal(err)
+					}
+					connPool.Remove(cn, nil)
+				}
+			})
+		})
+	}
+}

+ 118 - 0
internal/pool/conn.go

@@ -0,0 +1,118 @@
+package pool
+
+import (
+	"context"
+	"net"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/internal/proto"
+)
+
+var noDeadline = time.Time{}
+
+type Conn struct {
+	netConn net.Conn
+
+	rd *proto.Reader
+	wr *proto.Writer
+
+	Inited    bool
+	pooled    bool
+	createdAt time.Time
+	usedAt    int64 // atomic
+}
+
+func NewConn(netConn net.Conn) *Conn {
+	cn := &Conn{
+		netConn:   netConn,
+		createdAt: time.Now(),
+	}
+	cn.rd = proto.NewReader(netConn)
+	cn.wr = proto.NewWriter(netConn)
+	cn.SetUsedAt(time.Now())
+	return cn
+}
+
+func (cn *Conn) UsedAt() time.Time {
+	unix := atomic.LoadInt64(&cn.usedAt)
+	return time.Unix(unix, 0)
+}
+
+func (cn *Conn) SetUsedAt(tm time.Time) {
+	atomic.StoreInt64(&cn.usedAt, tm.Unix())
+}
+
+func (cn *Conn) SetNetConn(netConn net.Conn) {
+	cn.netConn = netConn
+	cn.rd.Reset(netConn)
+	cn.wr.Reset(netConn)
+}
+
+func (cn *Conn) Write(b []byte) (int, error) {
+	return cn.netConn.Write(b)
+}
+
+func (cn *Conn) RemoteAddr() net.Addr {
+	return cn.netConn.RemoteAddr()
+}
+
+func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
+	err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout))
+	if err != nil {
+		return err
+	}
+	return fn(cn.rd)
+}
+
+func (cn *Conn) WithWriter(
+	ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
+) error {
+	err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout))
+	if err != nil {
+		return err
+	}
+
+	if cn.wr.Buffered() > 0 {
+		cn.wr.Reset(cn.netConn)
+	}
+
+	err = fn(cn.wr)
+	if err != nil {
+		return err
+	}
+
+	return cn.wr.Flush()
+}
+
+func (cn *Conn) Close() error {
+	return cn.netConn.Close()
+}
+
+func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
+	tm := time.Now()
+	cn.SetUsedAt(tm)
+
+	if timeout > 0 {
+		tm = tm.Add(timeout)
+	}
+
+	if ctx != nil {
+		deadline, ok := ctx.Deadline()
+		if ok {
+			if timeout == 0 {
+				return deadline
+			}
+			if deadline.Before(tm) {
+				return deadline
+			}
+			return tm
+		}
+	}
+
+	if timeout > 0 {
+		return tm
+	}
+
+	return noDeadline
+}

+ 7 - 0
internal/pool/export_test.go

@@ -0,0 +1,7 @@
+package pool
+
+import "time"
+
+func (cn *Conn) SetCreatedAt(tm time.Time) {
+	cn.createdAt = tm
+}

+ 36 - 0
internal/pool/main_test.go

@@ -0,0 +1,36 @@
+package pool_test
+
+import (
+	"context"
+	"net"
+	"sync"
+	"testing"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+	RegisterFailHandler(Fail)
+	RunSpecs(t, "pool")
+}
+
+func perform(n int, cbs ...func(int)) {
+	var wg sync.WaitGroup
+	for _, cb := range cbs {
+		for i := 0; i < n; i++ {
+			wg.Add(1)
+			go func(cb func(int), i int) {
+				defer GinkgoRecover()
+				defer wg.Done()
+
+				cb(i)
+			}(cb, i)
+		}
+	}
+	wg.Wait()
+}
+
+func dummyDialer(context.Context) (net.Conn, error) {
+	return &net.TCPConn{}, nil
+}

+ 515 - 0
internal/pool/pool.go

@@ -0,0 +1,515 @@
+package pool
+
+import (
+	"context"
+	"errors"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/internal"
+)
+
+var ErrClosed = errors.New("redis: client is closed")
+var ErrPoolTimeout = errors.New("redis: connection pool timeout")
+
+var timers = sync.Pool{
+	New: func() interface{} {
+		t := time.NewTimer(time.Hour)
+		t.Stop()
+		return t
+	},
+}
+
+// Stats contains pool state information and accumulated stats.
+type Stats struct {
+	Hits     uint32 // number of times free connection was found in the pool
+	Misses   uint32 // number of times free connection was NOT found in the pool
+	Timeouts uint32 // number of times a wait timeout occurred
+
+	TotalConns uint32 // number of total connections in the pool
+	IdleConns  uint32 // number of idle connections in the pool
+	StaleConns uint32 // number of stale connections removed from the pool
+}
+
+type Pooler interface {
+	NewConn(context.Context) (*Conn, error)
+	CloseConn(*Conn) error
+
+	Get(context.Context) (*Conn, error)
+	Put(*Conn)
+	Remove(*Conn, error)
+
+	Len() int
+	IdleLen() int
+	Stats() *Stats
+
+	Close() error
+}
+
+type Options struct {
+	Dialer  func(context.Context) (net.Conn, error)
+	OnClose func(*Conn) error
+
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+}
+
+type ConnPool struct {
+	opt *Options
+
+	dialErrorsNum uint32 // atomic
+
+	lastDialErrorMu sync.RWMutex
+	lastDialError   error
+
+	queue chan struct{}
+
+	connsMu      sync.Mutex
+	conns        []*Conn
+	idleConns    []*Conn
+	poolSize     int
+	idleConnsLen int
+
+	stats Stats
+
+	_closed  uint32 // atomic
+	closedCh chan struct{}
+}
+
+var _ Pooler = (*ConnPool)(nil)
+
+func NewConnPool(opt *Options) *ConnPool {
+	p := &ConnPool{
+		opt: opt,
+
+		queue:     make(chan struct{}, opt.PoolSize),
+		conns:     make([]*Conn, 0, opt.PoolSize),
+		idleConns: make([]*Conn, 0, opt.PoolSize),
+		closedCh:  make(chan struct{}),
+	}
+
+	p.checkMinIdleConns()
+
+	if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
+		go p.reaper(opt.IdleCheckFrequency)
+	}
+
+	return p
+}
+
+func (p *ConnPool) checkMinIdleConns() {
+	if p.opt.MinIdleConns == 0 {
+		return
+	}
+	for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
+		p.poolSize++
+		p.idleConnsLen++
+		go func() {
+			err := p.addIdleConn()
+			if err != nil {
+				p.connsMu.Lock()
+				p.poolSize--
+				p.idleConnsLen--
+				p.connsMu.Unlock()
+			}
+		}()
+	}
+}
+
+func (p *ConnPool) addIdleConn() error {
+	cn, err := p.dialConn(context.TODO(), true)
+	if err != nil {
+		return err
+	}
+
+	p.connsMu.Lock()
+	p.conns = append(p.conns, cn)
+	p.idleConns = append(p.idleConns, cn)
+	p.connsMu.Unlock()
+	return nil
+}
+
+func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
+	return p.newConn(ctx, false)
+}
+
+func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
+	cn, err := p.dialConn(ctx, pooled)
+	if err != nil {
+		return nil, err
+	}
+
+	p.connsMu.Lock()
+	p.conns = append(p.conns, cn)
+	if pooled {
+		// If pool is full remove the cn on next Put.
+		if p.poolSize >= p.opt.PoolSize {
+			cn.pooled = false
+		} else {
+			p.poolSize++
+		}
+	}
+	p.connsMu.Unlock()
+	return cn, nil
+}
+
+func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
+	if p.closed() {
+		return nil, ErrClosed
+	}
+
+	if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
+		return nil, p.getLastDialError()
+	}
+
+	netConn, err := p.opt.Dialer(ctx)
+	if err != nil {
+		p.setLastDialError(err)
+		if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
+			go p.tryDial()
+		}
+		return nil, err
+	}
+
+	cn := NewConn(netConn)
+	cn.pooled = pooled
+	return cn, nil
+}
+
+func (p *ConnPool) tryDial() {
+	for {
+		if p.closed() {
+			return
+		}
+
+		conn, err := p.opt.Dialer(context.Background())
+		if err != nil {
+			p.setLastDialError(err)
+			time.Sleep(time.Second)
+			continue
+		}
+
+		atomic.StoreUint32(&p.dialErrorsNum, 0)
+		_ = conn.Close()
+		return
+	}
+}
+
+func (p *ConnPool) setLastDialError(err error) {
+	p.lastDialErrorMu.Lock()
+	p.lastDialError = err
+	p.lastDialErrorMu.Unlock()
+}
+
+func (p *ConnPool) getLastDialError() error {
+	p.lastDialErrorMu.RLock()
+	err := p.lastDialError
+	p.lastDialErrorMu.RUnlock()
+	return err
+}
+
+// Get returns existed connection from the pool or creates a new one.
+func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
+	if p.closed() {
+		return nil, ErrClosed
+	}
+
+	err := p.waitTurn(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	for {
+		p.connsMu.Lock()
+		cn := p.popIdle()
+		p.connsMu.Unlock()
+
+		if cn == nil {
+			break
+		}
+
+		if p.isStaleConn(cn) {
+			_ = p.CloseConn(cn)
+			continue
+		}
+
+		atomic.AddUint32(&p.stats.Hits, 1)
+		return cn, nil
+	}
+
+	atomic.AddUint32(&p.stats.Misses, 1)
+
+	newcn, err := p.newConn(ctx, true)
+	if err != nil {
+		p.freeTurn()
+		return nil, err
+	}
+
+	return newcn, nil
+}
+
+func (p *ConnPool) getTurn() {
+	p.queue <- struct{}{}
+}
+
+func (p *ConnPool) waitTurn(ctx context.Context) error {
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	select {
+	case p.queue <- struct{}{}:
+		return nil
+	default:
+	}
+
+	timer := timers.Get().(*time.Timer)
+	timer.Reset(p.opt.PoolTimeout)
+
+	select {
+	case <-ctx.Done():
+		if !timer.Stop() {
+			<-timer.C
+		}
+		timers.Put(timer)
+		return ctx.Err()
+	case p.queue <- struct{}{}:
+		if !timer.Stop() {
+			<-timer.C
+		}
+		timers.Put(timer)
+		return nil
+	case <-timer.C:
+		timers.Put(timer)
+		atomic.AddUint32(&p.stats.Timeouts, 1)
+		return ErrPoolTimeout
+	}
+}
+
+func (p *ConnPool) freeTurn() {
+	<-p.queue
+}
+
+func (p *ConnPool) popIdle() *Conn {
+	if len(p.idleConns) == 0 {
+		return nil
+	}
+
+	idx := len(p.idleConns) - 1
+	cn := p.idleConns[idx]
+	p.idleConns = p.idleConns[:idx]
+	p.idleConnsLen--
+	p.checkMinIdleConns()
+	return cn
+}
+
+func (p *ConnPool) Put(cn *Conn) {
+	if cn.rd.Buffered() > 0 {
+		internal.Logger.Printf("Conn has unread data")
+		p.Remove(cn, BadConnError{})
+		return
+	}
+
+	if !cn.pooled {
+		p.Remove(cn, nil)
+		return
+	}
+
+	p.connsMu.Lock()
+	p.idleConns = append(p.idleConns, cn)
+	p.idleConnsLen++
+	p.connsMu.Unlock()
+	p.freeTurn()
+}
+
+func (p *ConnPool) Remove(cn *Conn, reason error) {
+	p.removeConnWithLock(cn)
+	p.freeTurn()
+	_ = p.closeConn(cn)
+}
+
+func (p *ConnPool) CloseConn(cn *Conn) error {
+	p.removeConnWithLock(cn)
+	return p.closeConn(cn)
+}
+
+func (p *ConnPool) removeConnWithLock(cn *Conn) {
+	p.connsMu.Lock()
+	p.removeConn(cn)
+	p.connsMu.Unlock()
+}
+
+func (p *ConnPool) removeConn(cn *Conn) {
+	for i, c := range p.conns {
+		if c == cn {
+			p.conns = append(p.conns[:i], p.conns[i+1:]...)
+			if cn.pooled {
+				p.poolSize--
+				p.checkMinIdleConns()
+			}
+			return
+		}
+	}
+}
+
+func (p *ConnPool) closeConn(cn *Conn) error {
+	if p.opt.OnClose != nil {
+		_ = p.opt.OnClose(cn)
+	}
+	return cn.Close()
+}
+
+// Len returns total number of connections.
+func (p *ConnPool) Len() int {
+	p.connsMu.Lock()
+	n := len(p.conns)
+	p.connsMu.Unlock()
+	return n
+}
+
+// IdleLen returns number of idle connections.
+func (p *ConnPool) IdleLen() int {
+	p.connsMu.Lock()
+	n := p.idleConnsLen
+	p.connsMu.Unlock()
+	return n
+}
+
+func (p *ConnPool) Stats() *Stats {
+	idleLen := p.IdleLen()
+	return &Stats{
+		Hits:     atomic.LoadUint32(&p.stats.Hits),
+		Misses:   atomic.LoadUint32(&p.stats.Misses),
+		Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+
+		TotalConns: uint32(p.Len()),
+		IdleConns:  uint32(idleLen),
+		StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
+	}
+}
+
+func (p *ConnPool) closed() bool {
+	return atomic.LoadUint32(&p._closed) == 1
+}
+
+func (p *ConnPool) Filter(fn func(*Conn) bool) error {
+	var firstErr error
+	p.connsMu.Lock()
+	for _, cn := range p.conns {
+		if fn(cn) {
+			if err := p.closeConn(cn); err != nil && firstErr == nil {
+				firstErr = err
+			}
+		}
+	}
+	p.connsMu.Unlock()
+	return firstErr
+}
+
+func (p *ConnPool) Close() error {
+	if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
+		return ErrClosed
+	}
+	close(p.closedCh)
+
+	var firstErr error
+	p.connsMu.Lock()
+	for _, cn := range p.conns {
+		if err := p.closeConn(cn); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	p.conns = nil
+	p.poolSize = 0
+	p.idleConns = nil
+	p.idleConnsLen = 0
+	p.connsMu.Unlock()
+
+	return firstErr
+}
+
+func (p *ConnPool) reaper(frequency time.Duration) {
+	ticker := time.NewTicker(frequency)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			// It is possible that ticker and closedCh arrive together,
+			// and select pseudo-randomly pick ticker case, we double
+			// check here to prevent being executed after closed.
+			if p.closed() {
+				return
+			}
+			_, err := p.ReapStaleConns()
+			if err != nil {
+				internal.Logger.Printf("ReapStaleConns failed: %s", err)
+				continue
+			}
+		case <-p.closedCh:
+			return
+		}
+	}
+}
+
+func (p *ConnPool) ReapStaleConns() (int, error) {
+	var n int
+	for {
+		p.getTurn()
+
+		p.connsMu.Lock()
+		cn := p.reapStaleConn()
+		p.connsMu.Unlock()
+		p.freeTurn()
+
+		if cn != nil {
+			_ = p.closeConn(cn)
+			n++
+		} else {
+			break
+		}
+	}
+	atomic.AddUint32(&p.stats.StaleConns, uint32(n))
+	return n, nil
+}
+
+func (p *ConnPool) reapStaleConn() *Conn {
+	if len(p.idleConns) == 0 {
+		return nil
+	}
+
+	cn := p.idleConns[0]
+	if !p.isStaleConn(cn) {
+		return nil
+	}
+
+	p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
+	p.idleConnsLen--
+	p.removeConn(cn)
+
+	return cn
+}
+
+func (p *ConnPool) isStaleConn(cn *Conn) bool {
+	if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
+		return false
+	}
+
+	now := time.Now()
+	if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
+		return true
+	}
+	if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
+		return true
+	}
+
+	return false
+}

+ 208 - 0
internal/pool/pool_single.go

@@ -0,0 +1,208 @@
+package pool
+
+import (
+	"context"
+	"fmt"
+	"sync/atomic"
+)
+
+const (
+	stateDefault = 0
+	stateInited  = 1
+	stateClosed  = 2
+)
+
+type BadConnError struct {
+	wrapped error
+}
+
+var _ error = (*BadConnError)(nil)
+
+func (e BadConnError) Error() string {
+	s := "redis: Conn is in a bad state"
+	if e.wrapped != nil {
+		s += ": " + e.wrapped.Error()
+	}
+	return s
+}
+
+func (e BadConnError) Unwrap() error {
+	return e.wrapped
+}
+
+type SingleConnPool struct {
+	pool  Pooler
+	level int32 // atomic
+
+	state uint32 // atomic
+	ch    chan *Conn
+
+	_badConnError atomic.Value
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(pool Pooler) *SingleConnPool {
+	p, ok := pool.(*SingleConnPool)
+	if !ok {
+		p = &SingleConnPool{
+			pool: pool,
+			ch:   make(chan *Conn, 1),
+		}
+	}
+	atomic.AddInt32(&p.level, 1)
+	return p
+}
+
+func (p *SingleConnPool) SetConn(cn *Conn) {
+	if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
+		p.ch <- cn
+	} else {
+		panic("not reached")
+	}
+}
+
+func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
+	return p.pool.NewConn(ctx)
+}
+
+func (p *SingleConnPool) CloseConn(cn *Conn) error {
+	return p.pool.CloseConn(cn)
+}
+
+func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
+	// In worst case this races with Close which is not a very common operation.
+	for i := 0; i < 1000; i++ {
+		switch atomic.LoadUint32(&p.state) {
+		case stateDefault:
+			cn, err := p.pool.Get(ctx)
+			if err != nil {
+				return nil, err
+			}
+			if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
+				return cn, nil
+			}
+			p.pool.Remove(cn, ErrClosed)
+		case stateInited:
+			if err := p.badConnError(); err != nil {
+				return nil, err
+			}
+			cn, ok := <-p.ch
+			if !ok {
+				return nil, ErrClosed
+			}
+			return cn, nil
+		case stateClosed:
+			return nil, ErrClosed
+		default:
+			panic("not reached")
+		}
+	}
+	return nil, fmt.Errorf("redis: SingleConnPool.Get: infinite loop")
+}
+
+func (p *SingleConnPool) Put(cn *Conn) {
+	defer func() {
+		if recover() != nil {
+			p.freeConn(cn)
+		}
+	}()
+	p.ch <- cn
+}
+
+func (p *SingleConnPool) freeConn(cn *Conn) {
+	if err := p.badConnError(); err != nil {
+		p.pool.Remove(cn, err)
+	} else {
+		p.pool.Put(cn)
+	}
+}
+
+func (p *SingleConnPool) Remove(cn *Conn, reason error) {
+	defer func() {
+		if recover() != nil {
+			p.pool.Remove(cn, ErrClosed)
+		}
+	}()
+	p._badConnError.Store(BadConnError{wrapped: reason})
+	p.ch <- cn
+}
+
+func (p *SingleConnPool) Len() int {
+	switch atomic.LoadUint32(&p.state) {
+	case stateDefault:
+		return 0
+	case stateInited:
+		return 1
+	case stateClosed:
+		return 0
+	default:
+		panic("not reached")
+	}
+}
+
+func (p *SingleConnPool) IdleLen() int {
+	return len(p.ch)
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+	return &Stats{}
+}
+
+func (p *SingleConnPool) Close() error {
+	level := atomic.AddInt32(&p.level, -1)
+	if level > 0 {
+		return nil
+	}
+
+	for i := 0; i < 1000; i++ {
+		state := atomic.LoadUint32(&p.state)
+		if state == stateClosed {
+			return ErrClosed
+		}
+		if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
+			close(p.ch)
+			cn, ok := <-p.ch
+			if ok {
+				p.freeConn(cn)
+			}
+			return nil
+		}
+	}
+
+	return fmt.Errorf("redis: SingleConnPool.Close: infinite loop")
+}
+
+func (p *SingleConnPool) Reset() error {
+	if p.badConnError() == nil {
+		return nil
+	}
+
+	select {
+	case cn, ok := <-p.ch:
+		if !ok {
+			return ErrClosed
+		}
+		p.pool.Remove(cn, ErrClosed)
+		p._badConnError.Store(BadConnError{wrapped: nil})
+	default:
+		return fmt.Errorf("redis: SingleConnPool does not have a Conn")
+	}
+
+	if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
+		state := atomic.LoadUint32(&p.state)
+		return fmt.Errorf("redis: invalid SingleConnPool state: %d", state)
+	}
+
+	return nil
+}
+
+func (p *SingleConnPool) badConnError() error {
+	if v := p._badConnError.Load(); v != nil {
+		err := v.(BadConnError)
+		if err.wrapped != nil {
+			return err
+		}
+	}
+	return nil
+}

+ 112 - 0
internal/pool/pool_sticky.go

@@ -0,0 +1,112 @@
+package pool
+
+import (
+	"context"
+	"sync"
+)
+
+type StickyConnPool struct {
+	pool     *ConnPool
+	reusable bool
+
+	cn     *Conn
+	closed bool
+	mu     sync.Mutex
+}
+
+var _ Pooler = (*StickyConnPool)(nil)
+
+func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool {
+	return &StickyConnPool{
+		pool:     pool,
+		reusable: reusable,
+	}
+}
+
+func (p *StickyConnPool) NewConn(context.Context) (*Conn, error) {
+	panic("not implemented")
+}
+
+func (p *StickyConnPool) CloseConn(*Conn) error {
+	panic("not implemented")
+}
+
+func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.closed {
+		return nil, ErrClosed
+	}
+	if p.cn != nil {
+		return p.cn, nil
+	}
+
+	cn, err := p.pool.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	p.cn = cn
+	return cn, nil
+}
+
+func (p *StickyConnPool) putUpstream() {
+	p.pool.Put(p.cn)
+	p.cn = nil
+}
+
+func (p *StickyConnPool) Put(cn *Conn) {}
+
+func (p *StickyConnPool) removeUpstream(reason error) {
+	p.pool.Remove(p.cn, reason)
+	p.cn = nil
+}
+
+func (p *StickyConnPool) Remove(cn *Conn, reason error) {
+	p.removeUpstream(reason)
+}
+
+func (p *StickyConnPool) Len() int {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.cn == nil {
+		return 0
+	}
+	return 1
+}
+
+func (p *StickyConnPool) IdleLen() int {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.cn == nil {
+		return 1
+	}
+	return 0
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+	return nil
+}
+
+func (p *StickyConnPool) Close() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.closed {
+		return ErrClosed
+	}
+	p.closed = true
+
+	if p.cn != nil {
+		if p.reusable {
+			p.putUpstream()
+		} else {
+			p.removeUpstream(ErrClosed)
+		}
+	}
+
+	return nil
+}

+ 421 - 0
internal/pool/pool_test.go

@@ -0,0 +1,421 @@
+package pool_test
+
+import (
+	"context"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/go-redis/redis/v7/internal/pool"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("ConnPool", func() {
+	c := context.Background()
+	var connPool *pool.ConnPool
+
+	BeforeEach(func() {
+		connPool = pool.NewConnPool(&pool.Options{
+			Dialer:             dummyDialer,
+			PoolSize:           10,
+			PoolTimeout:        time.Hour,
+			IdleTimeout:        time.Millisecond,
+			IdleCheckFrequency: time.Millisecond,
+		})
+	})
+
+	AfterEach(func() {
+		connPool.Close()
+	})
+
+	It("should unblock client when conn is removed", func() {
+		// Reserve one connection.
+		cn, err := connPool.Get(c)
+		Expect(err).NotTo(HaveOccurred())
+
+		// Reserve all other connections.
+		var cns []*pool.Conn
+		for i := 0; i < 9; i++ {
+			cn, err := connPool.Get(c)
+			Expect(err).NotTo(HaveOccurred())
+			cns = append(cns, cn)
+		}
+
+		started := make(chan bool, 1)
+		done := make(chan bool, 1)
+		go func() {
+			defer GinkgoRecover()
+
+			started <- true
+			_, err := connPool.Get(c)
+			Expect(err).NotTo(HaveOccurred())
+			done <- true
+
+			connPool.Put(cn)
+		}()
+		<-started
+
+		// Check that Get is blocked.
+		select {
+		case <-done:
+			Fail("Get is not blocked")
+		case <-time.After(time.Millisecond):
+			// ok
+		}
+
+		connPool.Remove(cn, nil)
+
+		// Check that Get is unblocked.
+		select {
+		case <-done:
+			// ok
+		case <-time.After(time.Second):
+			Fail("Get is not unblocked")
+		}
+
+		for _, cn := range cns {
+			connPool.Put(cn)
+		}
+	})
+})
+
+var _ = Describe("MinIdleConns", func() {
+	c := context.Background()
+	const poolSize = 100
+	var minIdleConns int
+	var connPool *pool.ConnPool
+
+	newConnPool := func() *pool.ConnPool {
+		connPool := pool.NewConnPool(&pool.Options{
+			Dialer:             dummyDialer,
+			PoolSize:           poolSize,
+			MinIdleConns:       minIdleConns,
+			PoolTimeout:        100 * time.Millisecond,
+			IdleTimeout:        -1,
+			IdleCheckFrequency: -1,
+		})
+		Eventually(func() int {
+			return connPool.Len()
+		}).Should(Equal(minIdleConns))
+		return connPool
+	}
+
+	assert := func() {
+		It("has idle connections when created", func() {
+			Expect(connPool.Len()).To(Equal(minIdleConns))
+			Expect(connPool.IdleLen()).To(Equal(minIdleConns))
+		})
+
+		Context("after Get", func() {
+			var cn *pool.Conn
+
+			BeforeEach(func() {
+				var err error
+				cn, err = connPool.Get(c)
+				Expect(err).NotTo(HaveOccurred())
+
+				Eventually(func() int {
+					return connPool.Len()
+				}).Should(Equal(minIdleConns + 1))
+			})
+
+			It("has idle connections", func() {
+				Expect(connPool.Len()).To(Equal(minIdleConns + 1))
+				Expect(connPool.IdleLen()).To(Equal(minIdleConns))
+			})
+
+			Context("after Remove", func() {
+				BeforeEach(func() {
+					connPool.Remove(cn, nil)
+				})
+
+				It("has idle connections", func() {
+					Expect(connPool.Len()).To(Equal(minIdleConns))
+					Expect(connPool.IdleLen()).To(Equal(minIdleConns))
+				})
+			})
+		})
+
+		Describe("Get does not exceed pool size", func() {
+			var mu sync.RWMutex
+			var cns []*pool.Conn
+
+			BeforeEach(func() {
+				cns = make([]*pool.Conn, 0)
+
+				perform(poolSize, func(_ int) {
+					defer GinkgoRecover()
+
+					cn, err := connPool.Get(c)
+					Expect(err).NotTo(HaveOccurred())
+					mu.Lock()
+					cns = append(cns, cn)
+					mu.Unlock()
+				})
+
+				Eventually(func() int {
+					return connPool.Len()
+				}).Should(BeNumerically(">=", poolSize))
+			})
+
+			It("Get is blocked", func() {
+				done := make(chan struct{})
+				go func() {
+					connPool.Get(c)
+					close(done)
+				}()
+
+				select {
+				case <-done:
+					Fail("Get is not blocked")
+				case <-time.After(time.Millisecond):
+					// ok
+				}
+
+				select {
+				case <-done:
+					// ok
+				case <-time.After(time.Second):
+					Fail("Get is not unblocked")
+				}
+			})
+
+			Context("after Put", func() {
+				BeforeEach(func() {
+					perform(len(cns), func(i int) {
+						mu.RLock()
+						connPool.Put(cns[i])
+						mu.RUnlock()
+					})
+
+					Eventually(func() int {
+						return connPool.Len()
+					}).Should(Equal(poolSize))
+				})
+
+				It("pool.Len is back to normal", func() {
+					Expect(connPool.Len()).To(Equal(poolSize))
+					Expect(connPool.IdleLen()).To(Equal(poolSize))
+				})
+			})
+
+			Context("after Remove", func() {
+				BeforeEach(func() {
+					perform(len(cns), func(i int) {
+						mu.RLock()
+						connPool.Remove(cns[i], nil)
+						mu.RUnlock()
+					})
+
+					Eventually(func() int {
+						return connPool.Len()
+					}).Should(Equal(minIdleConns))
+				})
+
+				It("has idle connections", func() {
+					Expect(connPool.Len()).To(Equal(minIdleConns))
+					Expect(connPool.IdleLen()).To(Equal(minIdleConns))
+				})
+			})
+		})
+	}
+
+	Context("minIdleConns = 1", func() {
+		BeforeEach(func() {
+			minIdleConns = 1
+			connPool = newConnPool()
+		})
+
+		AfterEach(func() {
+			connPool.Close()
+		})
+
+		assert()
+	})
+
+	Context("minIdleConns = 32", func() {
+		BeforeEach(func() {
+			minIdleConns = 32
+			connPool = newConnPool()
+		})
+
+		AfterEach(func() {
+			connPool.Close()
+		})
+
+		assert()
+	})
+})
+
+var _ = Describe("conns reaper", func() {
+	c := context.Background()
+
+	const idleTimeout = time.Minute
+	const maxAge = time.Hour
+
+	var connPool *pool.ConnPool
+	var conns, staleConns, closedConns []*pool.Conn
+
+	assert := func(typ string) {
+		BeforeEach(func() {
+			closedConns = nil
+			connPool = pool.NewConnPool(&pool.Options{
+				Dialer:             dummyDialer,
+				PoolSize:           10,
+				IdleTimeout:        idleTimeout,
+				MaxConnAge:         maxAge,
+				PoolTimeout:        time.Second,
+				IdleCheckFrequency: time.Hour,
+				OnClose: func(cn *pool.Conn) error {
+					closedConns = append(closedConns, cn)
+					return nil
+				},
+			})
+
+			conns = nil
+
+			// add stale connections
+			staleConns = nil
+			for i := 0; i < 3; i++ {
+				cn, err := connPool.Get(c)
+				Expect(err).NotTo(HaveOccurred())
+				switch typ {
+				case "idle":
+					cn.SetUsedAt(time.Now().Add(-2 * idleTimeout))
+				case "aged":
+					cn.SetCreatedAt(time.Now().Add(-2 * maxAge))
+				}
+				conns = append(conns, cn)
+				staleConns = append(staleConns, cn)
+			}
+
+			// add fresh connections
+			for i := 0; i < 3; i++ {
+				cn, err := connPool.Get(c)
+				Expect(err).NotTo(HaveOccurred())
+				conns = append(conns, cn)
+			}
+
+			for _, cn := range conns {
+				connPool.Put(cn)
+			}
+
+			Expect(connPool.Len()).To(Equal(6))
+			Expect(connPool.IdleLen()).To(Equal(6))
+
+			n, err := connPool.ReapStaleConns()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(3))
+		})
+
+		AfterEach(func() {
+			_ = connPool.Close()
+			Expect(connPool.Len()).To(Equal(0))
+			Expect(connPool.IdleLen()).To(Equal(0))
+			Expect(len(closedConns)).To(Equal(len(conns)))
+			Expect(closedConns).To(ConsistOf(conns))
+		})
+
+		It("reaps stale connections", func() {
+			Expect(connPool.Len()).To(Equal(3))
+			Expect(connPool.IdleLen()).To(Equal(3))
+		})
+
+		It("does not reap fresh connections", func() {
+			n, err := connPool.ReapStaleConns()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(n).To(Equal(0))
+		})
+
+		It("stale connections are closed", func() {
+			Expect(len(closedConns)).To(Equal(len(staleConns)))
+			Expect(closedConns).To(ConsistOf(staleConns))
+		})
+
+		It("pool is functional", func() {
+			for j := 0; j < 3; j++ {
+				var freeCns []*pool.Conn
+				for i := 0; i < 3; i++ {
+					cn, err := connPool.Get(c)
+					Expect(err).NotTo(HaveOccurred())
+					Expect(cn).NotTo(BeNil())
+					freeCns = append(freeCns, cn)
+				}
+
+				Expect(connPool.Len()).To(Equal(3))
+				Expect(connPool.IdleLen()).To(Equal(0))
+
+				cn, err := connPool.Get(c)
+				Expect(err).NotTo(HaveOccurred())
+				Expect(cn).NotTo(BeNil())
+				conns = append(conns, cn)
+
+				Expect(connPool.Len()).To(Equal(4))
+				Expect(connPool.IdleLen()).To(Equal(0))
+
+				connPool.Remove(cn, nil)
+
+				Expect(connPool.Len()).To(Equal(3))
+				Expect(connPool.IdleLen()).To(Equal(0))
+
+				for _, cn := range freeCns {
+					connPool.Put(cn)
+				}
+
+				Expect(connPool.Len()).To(Equal(3))
+				Expect(connPool.IdleLen()).To(Equal(3))
+			}
+		})
+	}
+
+	assert("idle")
+	assert("aged")
+})
+
+var _ = Describe("race", func() {
+	c := context.Background()
+	var connPool *pool.ConnPool
+	var C, N int
+
+	BeforeEach(func() {
+		C, N = 10, 1000
+		if testing.Short() {
+			C = 4
+			N = 100
+		}
+	})
+
+	AfterEach(func() {
+		connPool.Close()
+	})
+
+	It("does not happen on Get, Put, and Remove", func() {
+		connPool = pool.NewConnPool(&pool.Options{
+			Dialer:             dummyDialer,
+			PoolSize:           10,
+			PoolTimeout:        time.Minute,
+			IdleTimeout:        time.Millisecond,
+			IdleCheckFrequency: time.Millisecond,
+		})
+
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				cn, err := connPool.Get(c)
+				Expect(err).NotTo(HaveOccurred())
+				if err == nil {
+					connPool.Put(cn)
+				}
+			}
+		}, func(id int) {
+			for i := 0; i < N; i++ {
+				cn, err := connPool.Get(c)
+				Expect(err).NotTo(HaveOccurred())
+				if err == nil {
+					connPool.Remove(cn, nil)
+				}
+			}
+		})
+	})
+})

+ 13 - 0
internal/proto/proto_test.go

@@ -0,0 +1,13 @@
+package proto_test
+
+import (
+	"testing"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+	RegisterFailHandler(Fail)
+	RunSpecs(t, "proto")
+}

+ 312 - 0
internal/proto/reader.go

@@ -0,0 +1,312 @@
+package proto
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+
+	"github.com/go-redis/redis/internal/util"
+)
+
+const (
+	ErrorReply  = '-'
+	StatusReply = '+'
+	IntReply    = ':'
+	StringReply = '$'
+	ArrayReply  = '*'
+)
+
+//------------------------------------------------------------------------------
+
+const Nil = RedisError("redis: nil")
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+//------------------------------------------------------------------------------
+
+type MultiBulkParse func(*Reader, int64) (interface{}, error)
+
+type Reader struct {
+	rd   *bufio.Reader
+	_buf []byte
+}
+
+func NewReader(rd io.Reader) *Reader {
+	return &Reader{
+		rd:   bufio.NewReader(rd),
+		_buf: make([]byte, 64),
+	}
+}
+
+func (r *Reader) Buffered() int {
+	return r.rd.Buffered()
+}
+
+func (r *Reader) Peek(n int) ([]byte, error) {
+	return r.rd.Peek(n)
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+	r.rd.Reset(rd)
+}
+
+func (r *Reader) ReadLine() ([]byte, error) {
+	line, err := r.readLine()
+	if err != nil {
+		return nil, err
+	}
+	if isNilReply(line) {
+		return nil, Nil
+	}
+	return line, nil
+}
+
+// readLine that returns an error if:
+//   - there is a pending read error;
+//   - or line does not end with \r\n.
+func (r *Reader) readLine() ([]byte, error) {
+	b, err := r.rd.ReadSlice('\n')
+	if err != nil {
+		return nil, err
+	}
+	if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
+		return nil, fmt.Errorf("redis: invalid reply: %q", b)
+	}
+	b = b[:len(b)-2]
+	return b, nil
+}
+
+func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case StatusReply:
+		return string(line[1:]), nil
+	case IntReply:
+		return util.ParseInt(line[1:], 10, 64)
+	case StringReply:
+		return r.readStringReply(line)
+	case ArrayReply:
+		n, err := parseArrayLen(line)
+		if err != nil {
+			return nil, err
+		}
+		if m == nil {
+			err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
+			return nil, err
+		}
+		return m(r, n)
+	}
+	return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (r *Reader) ReadIntReply() (int64, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return 0, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return 0, ParseErrorReply(line)
+	case IntReply:
+		return util.ParseInt(line[1:], 10, 64)
+	default:
+		return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+	}
+}
+
+func (r *Reader) ReadString() (string, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return "", err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return "", ParseErrorReply(line)
+	case StringReply:
+		return r.readStringReply(line)
+	case StatusReply:
+		return string(line[1:]), nil
+	case IntReply:
+		return string(line[1:]), nil
+	default:
+		return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
+	}
+}
+
+func (r *Reader) readStringReply(line []byte) (string, error) {
+	if isNilReply(line) {
+		return "", Nil
+	}
+
+	replyLen, err := util.Atoi(line[1:])
+	if err != nil {
+		return "", err
+	}
+
+	b := make([]byte, replyLen+2)
+	_, err = io.ReadFull(r.rd, b)
+	if err != nil {
+		return "", err
+	}
+
+	return util.BytesToString(b[:replyLen]), nil
+}
+
+func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case ArrayReply:
+		n, err := parseArrayLen(line)
+		if err != nil {
+			return nil, err
+		}
+		return m(r, n)
+	default:
+		return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+	}
+}
+
+func (r *Reader) ReadArrayLen() (int64, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return 0, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return 0, ParseErrorReply(line)
+	case ArrayReply:
+		return parseArrayLen(line)
+	default:
+		return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+	}
+}
+
+func (r *Reader) ReadScanReply() ([]string, uint64, error) {
+	n, err := r.ReadArrayLen()
+	if err != nil {
+		return nil, 0, err
+	}
+	if n != 2 {
+		return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
+	}
+
+	cursor, err := r.ReadUint()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	n, err = r.ReadArrayLen()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	keys := make([]string, n)
+	for i := int64(0); i < n; i++ {
+		key, err := r.ReadString()
+		if err != nil {
+			return nil, 0, err
+		}
+		keys[i] = key
+	}
+
+	return keys, cursor, err
+}
+
+func (r *Reader) ReadInt() (int64, error) {
+	b, err := r.readTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return util.ParseInt(b, 10, 64)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+	b, err := r.readTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return util.ParseUint(b, 10, 64)
+}
+
+func (r *Reader) ReadFloatReply() (float64, error) {
+	b, err := r.readTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return util.ParseFloat(b, 64)
+}
+
+func (r *Reader) readTmpBytesReply() ([]byte, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case StringReply:
+		return r._readTmpBytesReply(line)
+	case StatusReply:
+		return line[1:], nil
+	default:
+		return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+	}
+}
+
+func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
+	if isNilReply(line) {
+		return nil, Nil
+	}
+
+	replyLen, err := util.Atoi(line[1:])
+	if err != nil {
+		return nil, err
+	}
+
+	buf := r.buf(replyLen + 2)
+	_, err = io.ReadFull(r.rd, buf)
+	if err != nil {
+		return nil, err
+	}
+
+	return buf[:replyLen], nil
+}
+
+func (r *Reader) buf(n int) []byte {
+	if n <= cap(r._buf) {
+		return r._buf[:n]
+	}
+	d := n - cap(r._buf)
+	r._buf = append(r._buf, make([]byte, d)...)
+	return r._buf
+}
+
+func isNilReply(b []byte) bool {
+	return len(b) == 3 &&
+		(b[0] == StringReply || b[0] == ArrayReply) &&
+		b[1] == '-' && b[2] == '1'
+}
+
+func ParseErrorReply(line []byte) error {
+	return RedisError(string(line[1:]))
+}
+
+func parseArrayLen(line []byte) (int64, error) {
+	if isNilReply(line) {
+		return 0, Nil
+	}
+	return util.ParseInt(line[1:], 10, 64)
+}

+ 56 - 0
internal/proto/reader_test.go

@@ -0,0 +1,56 @@
+package proto_test
+
+import (
+	"bytes"
+	"testing"
+
+	"github.com/go-redis/redis/v7/internal/proto"
+)
+
+func BenchmarkReader_ParseReply_Status(b *testing.B) {
+	benchmarkParseReply(b, "+OK\r\n", nil, false)
+}
+
+func BenchmarkReader_ParseReply_Int(b *testing.B) {
+	benchmarkParseReply(b, ":1\r\n", nil, false)
+}
+
+func BenchmarkReader_ParseReply_Error(b *testing.B) {
+	benchmarkParseReply(b, "-Error message\r\n", nil, true)
+}
+
+func BenchmarkReader_ParseReply_String(b *testing.B) {
+	benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false)
+}
+
+func BenchmarkReader_ParseReply_Slice(b *testing.B) {
+	benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", multiBulkParse, false)
+}
+
+func benchmarkParseReply(b *testing.B, reply string, m proto.MultiBulkParse, wanterr bool) {
+	buf := new(bytes.Buffer)
+	for i := 0; i < b.N; i++ {
+		buf.WriteString(reply)
+	}
+	p := proto.NewReader(buf)
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		_, err := p.ReadReply(m)
+		if !wanterr && err != nil {
+			b.Fatal(err)
+		}
+	}
+}
+
+func multiBulkParse(p *proto.Reader, n int64) (interface{}, error) {
+	vv := make([]interface{}, 0, n)
+	for i := int64(0); i < n; i++ {
+		v, err := p.ReadReply(multiBulkParse)
+		if err != nil {
+			return nil, err
+		}
+		vv = append(vv, v)
+	}
+	return vv, nil
+}

+ 166 - 0
internal/proto/scan.go

@@ -0,0 +1,166 @@
+package proto
+
+import (
+	"encoding"
+	"fmt"
+	"reflect"
+
+	"github.com/go-redis/redis/internal/util"
+)
+
+func Scan(b []byte, v interface{}) error {
+	switch v := v.(type) {
+	case nil:
+		return fmt.Errorf("redis: Scan(nil)")
+	case *string:
+		*v = util.BytesToString(b)
+		return nil
+	case *[]byte:
+		*v = b
+		return nil
+	case *int:
+		var err error
+		*v, err = util.Atoi(b)
+		return err
+	case *int8:
+		n, err := util.ParseInt(b, 10, 8)
+		if err != nil {
+			return err
+		}
+		*v = int8(n)
+		return nil
+	case *int16:
+		n, err := util.ParseInt(b, 10, 16)
+		if err != nil {
+			return err
+		}
+		*v = int16(n)
+		return nil
+	case *int32:
+		n, err := util.ParseInt(b, 10, 32)
+		if err != nil {
+			return err
+		}
+		*v = int32(n)
+		return nil
+	case *int64:
+		n, err := util.ParseInt(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = n
+		return nil
+	case *uint:
+		n, err := util.ParseUint(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = uint(n)
+		return nil
+	case *uint8:
+		n, err := util.ParseUint(b, 10, 8)
+		if err != nil {
+			return err
+		}
+		*v = uint8(n)
+		return nil
+	case *uint16:
+		n, err := util.ParseUint(b, 10, 16)
+		if err != nil {
+			return err
+		}
+		*v = uint16(n)
+		return nil
+	case *uint32:
+		n, err := util.ParseUint(b, 10, 32)
+		if err != nil {
+			return err
+		}
+		*v = uint32(n)
+		return nil
+	case *uint64:
+		n, err := util.ParseUint(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = n
+		return nil
+	case *float32:
+		n, err := util.ParseFloat(b, 32)
+		if err != nil {
+			return err
+		}
+		*v = float32(n)
+		return err
+	case *float64:
+		var err error
+		*v, err = util.ParseFloat(b, 64)
+		return err
+	case *bool:
+		*v = len(b) == 1 && b[0] == '1'
+		return nil
+	case encoding.BinaryUnmarshaler:
+		return v.UnmarshalBinary(b)
+	default:
+		return fmt.Errorf(
+			"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
+	}
+}
+
+func ScanSlice(data []string, slice interface{}) error {
+	v := reflect.ValueOf(slice)
+	if !v.IsValid() {
+		return fmt.Errorf("redis: ScanSlice(nil)")
+	}
+	if v.Kind() != reflect.Ptr {
+		return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Slice {
+		return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
+	}
+
+	next := makeSliceNextElemFunc(v)
+	for i, s := range data {
+		elem := next()
+		if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
+			err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
+			return err
+		}
+	}
+
+	return nil
+}
+
+func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
+	elemType := v.Type().Elem()
+
+	if elemType.Kind() == reflect.Ptr {
+		elemType = elemType.Elem()
+		return func() reflect.Value {
+			if v.Len() < v.Cap() {
+				v.Set(v.Slice(0, v.Len()+1))
+				elem := v.Index(v.Len() - 1)
+				if elem.IsNil() {
+					elem.Set(reflect.New(elemType))
+				}
+				return elem.Elem()
+			}
+
+			elem := reflect.New(elemType)
+			v.Set(reflect.Append(v, elem))
+			return elem.Elem()
+		}
+	}
+
+	zero := reflect.Zero(elemType)
+	return func() reflect.Value {
+		if v.Len() < v.Cap() {
+			v.Set(v.Slice(0, v.Len()+1))
+			return v.Index(v.Len() - 1)
+		}
+
+		v.Set(reflect.Append(v, zero))
+		return v.Index(v.Len() - 1)
+	}
+}

+ 48 - 0
internal/proto/scan_test.go

@@ -0,0 +1,48 @@
+package proto
+
+import (
+	"encoding/json"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+type testScanSliceStruct struct {
+	ID   int
+	Name string
+}
+
+func (s *testScanSliceStruct) MarshalBinary() ([]byte, error) {
+	return json.Marshal(s)
+}
+
+func (s *testScanSliceStruct) UnmarshalBinary(b []byte) error {
+	return json.Unmarshal(b, s)
+}
+
+var _ = Describe("ScanSlice", func() {
+	data := []string{
+		`{"ID":-1,"Name":"Back Yu"}`,
+		`{"ID":1,"Name":"szyhf"}`,
+	}
+
+	It("[]testScanSliceStruct", func() {
+		var slice []testScanSliceStruct
+		err := ScanSlice(data, &slice)
+		Expect(err).NotTo(HaveOccurred())
+		Expect(slice).To(Equal([]testScanSliceStruct{
+			{-1, "Back Yu"},
+			{1, "szyhf"},
+		}))
+	})
+
+	It("var testContainer []*testScanSliceStruct", func() {
+		var slice []*testScanSliceStruct
+		err := ScanSlice(data, &slice)
+		Expect(err).NotTo(HaveOccurred())
+		Expect(slice).To(Equal([]*testScanSliceStruct{
+			{-1, "Back Yu"},
+			{1, "szyhf"},
+		}))
+	})
+})

+ 93 - 0
internal/proto/write_buffer_test.go

@@ -0,0 +1,93 @@
+package proto_test
+
+import (
+	"bytes"
+	"encoding"
+	"io/ioutil"
+	"testing"
+	"time"
+
+	"github.com/go-redis/redis/v7/internal/proto"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+type MyType struct{}
+
+var _ encoding.BinaryMarshaler = (*MyType)(nil)
+
+func (t *MyType) MarshalBinary() ([]byte, error) {
+	return []byte("hello"), nil
+}
+
+var _ = Describe("WriteBuffer", func() {
+	var buf *bytes.Buffer
+	var wr *proto.Writer
+
+	BeforeEach(func() {
+		buf = new(bytes.Buffer)
+		wr = proto.NewWriter(buf)
+	})
+
+	It("should write args", func() {
+		err := wr.WriteArgs([]interface{}{
+			"string",
+			12,
+			34.56,
+			[]byte{'b', 'y', 't', 'e', 's'},
+			true,
+			nil,
+		})
+		Expect(err).NotTo(HaveOccurred())
+
+		err = wr.Flush()
+		Expect(err).NotTo(HaveOccurred())
+
+		Expect(buf.Bytes()).To(Equal([]byte("*6\r\n" +
+			"$6\r\nstring\r\n" +
+			"$2\r\n12\r\n" +
+			"$5\r\n34.56\r\n" +
+			"$5\r\nbytes\r\n" +
+			"$1\r\n1\r\n" +
+			"$0\r\n" +
+			"\r\n")))
+	})
+
+	It("should append time", func() {
+		err := wr.WriteArgs([]interface{}{time.Unix(1414141414, 0).UTC()})
+		Expect(err).NotTo(HaveOccurred())
+
+		err = wr.Flush()
+		Expect(err).NotTo(HaveOccurred())
+
+		Expect(buf.Len()).To(Equal(31))
+	})
+
+	It("should append marshalable args", func() {
+		err := wr.WriteArgs([]interface{}{&MyType{}})
+		Expect(err).NotTo(HaveOccurred())
+
+		err = wr.Flush()
+		Expect(err).NotTo(HaveOccurred())
+
+		Expect(buf.Len()).To(Equal(15))
+	})
+})
+
+func BenchmarkWriteBuffer_Append(b *testing.B) {
+	buf := proto.NewWriter(ioutil.Discard)
+	args := []interface{}{"hello", "world", "foo", "bar"}
+
+	for i := 0; i < b.N; i++ {
+		err := buf.WriteArgs(args)
+		if err != nil {
+			panic(err)
+		}
+
+		err = buf.Flush()
+		if err != nil {
+			panic(err)
+		}
+	}
+}

+ 165 - 0
internal/proto/writer.go

@@ -0,0 +1,165 @@
+package proto
+
+import (
+	"bufio"
+	"encoding"
+	"fmt"
+	"io"
+	"strconv"
+	"time"
+
+	"github.com/go-redis/redis/internal/util"
+)
+
+type Writer struct {
+	wr *bufio.Writer
+
+	lenBuf []byte
+	numBuf []byte
+}
+
+func NewWriter(wr io.Writer) *Writer {
+	return &Writer{
+		wr: bufio.NewWriter(wr),
+
+		lenBuf: make([]byte, 64),
+		numBuf: make([]byte, 64),
+	}
+}
+
+func (w *Writer) WriteArgs(args []interface{}) error {
+	err := w.wr.WriteByte(ArrayReply)
+	if err != nil {
+		return err
+	}
+
+	err = w.writeLen(len(args))
+	if err != nil {
+		return err
+	}
+
+	for _, arg := range args {
+		err := w.writeArg(arg)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (w *Writer) writeLen(n int) error {
+	w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
+	w.lenBuf = append(w.lenBuf, '\r', '\n')
+	_, err := w.wr.Write(w.lenBuf)
+	return err
+}
+
+func (w *Writer) writeArg(v interface{}) error {
+	switch v := v.(type) {
+	case nil:
+		return w.string("")
+	case string:
+		return w.string(v)
+	case []byte:
+		return w.bytes(v)
+	case int:
+		return w.int(int64(v))
+	case int8:
+		return w.int(int64(v))
+	case int16:
+		return w.int(int64(v))
+	case int32:
+		return w.int(int64(v))
+	case int64:
+		return w.int(v)
+	case uint:
+		return w.uint(uint64(v))
+	case uint8:
+		return w.uint(uint64(v))
+	case uint16:
+		return w.uint(uint64(v))
+	case uint32:
+		return w.uint(uint64(v))
+	case uint64:
+		return w.uint(v)
+	case float32:
+		return w.float(float64(v))
+	case float64:
+		return w.float(v)
+	case bool:
+		if v {
+			return w.int(1)
+		}
+		return w.int(0)
+	case time.Time:
+		return w.string(v.Format(time.RFC3339))
+	case encoding.BinaryMarshaler:
+		b, err := v.MarshalBinary()
+		if err != nil {
+			return err
+		}
+		return w.bytes(b)
+	default:
+		return fmt.Errorf(
+			"redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
+	}
+}
+
+func (w *Writer) bytes(b []byte) error {
+	err := w.wr.WriteByte(StringReply)
+	if err != nil {
+		return err
+	}
+
+	err = w.writeLen(len(b))
+	if err != nil {
+		return err
+	}
+
+	_, err = w.wr.Write(b)
+	if err != nil {
+		return err
+	}
+
+	return w.crlf()
+}
+
+func (w *Writer) string(s string) error {
+	return w.bytes(util.StringToBytes(s))
+}
+
+func (w *Writer) uint(n uint64) error {
+	w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
+	return w.bytes(w.numBuf)
+}
+
+func (w *Writer) int(n int64) error {
+	w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
+	return w.bytes(w.numBuf)
+}
+
+func (w *Writer) float(f float64) error {
+	w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
+	return w.bytes(w.numBuf)
+}
+
+func (w *Writer) crlf() error {
+	err := w.wr.WriteByte('\r')
+	if err != nil {
+		return err
+	}
+	return w.wr.WriteByte('\n')
+}
+
+func (w *Writer) Buffered() int {
+	return w.wr.Buffered()
+}
+
+func (w *Writer) Reset(wr io.Writer) {
+	w.wr.Reset(wr)
+}
+
+func (w *Writer) Flush() error {
+	return w.wr.Flush()
+}

+ 56 - 0
internal/util.go

@@ -0,0 +1,56 @@
+package internal
+
+import (
+	"context"
+	"time"
+
+	"github.com/go-redis/redis/internal/util"
+)
+
+func Sleep(ctx context.Context, dur time.Duration) error {
+	t := time.NewTimer(dur)
+	defer t.Stop()
+
+	select {
+	case <-t.C:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
+
+func ToLower(s string) string {
+	if isLower(s) {
+		return s
+	}
+
+	b := make([]byte, len(s))
+	for i := range b {
+		c := s[i]
+		if c >= 'A' && c <= 'Z' {
+			c += 'a' - 'A'
+		}
+		b[i] = c
+	}
+	return util.BytesToString(b)
+}
+
+func isLower(s string) bool {
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if c >= 'A' && c <= 'Z' {
+			return false
+		}
+	}
+	return true
+}
+
+func Unwrap(err error) error {
+	u, ok := err.(interface {
+		Unwrap() error
+	})
+	if !ok {
+		return nil
+	}
+	return u.Unwrap()
+}

+ 11 - 0
internal/util/safe.go

@@ -0,0 +1,11 @@
+// +build appengine
+
+package util
+
+func BytesToString(b []byte) string {
+	return string(b)
+}
+
+func StringToBytes(s string) []byte {
+	return []byte(s)
+}

+ 19 - 0
internal/util/strconv.go

@@ -0,0 +1,19 @@
+package util
+
+import "strconv"
+
+func Atoi(b []byte) (int, error) {
+	return strconv.Atoi(BytesToString(b))
+}
+
+func ParseInt(b []byte, base int, bitSize int) (int64, error) {
+	return strconv.ParseInt(BytesToString(b), base, bitSize)
+}
+
+func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
+	return strconv.ParseUint(BytesToString(b), base, bitSize)
+}
+
+func ParseFloat(b []byte, bitSize int) (float64, error) {
+	return strconv.ParseFloat(BytesToString(b), bitSize)
+}

+ 22 - 0
internal/util/unsafe.go

@@ -0,0 +1,22 @@
+// +build !appengine
+
+package util
+
+import (
+	"unsafe"
+)
+
+// BytesToString converts byte slice to string.
+func BytesToString(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
+
+// StringToBytes converts string to byte slice.
+func StringToBytes(s string) []byte {
+	return *(*[]byte)(unsafe.Pointer(
+		&struct {
+			string
+			Cap int
+		}{s, len(s)},
+	))
+}

+ 65 - 0
internal_test.go

@@ -0,0 +1,65 @@
+package redis
+
+import (
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("newClusterState", func() {
+	var state *clusterState
+
+	createClusterState := func(slots []ClusterSlot) *clusterState {
+		nodes := newClusterNodes(&ClusterOptions{})
+		state, err := newClusterState(nodes, slots, "10.10.10.10:1234")
+		Expect(err).NotTo(HaveOccurred())
+		return state
+	}
+
+	Describe("sorting", func() {
+		BeforeEach(func() {
+			state = createClusterState([]ClusterSlot{{
+				Start: 1000,
+				End:   1999,
+			}, {
+				Start: 0,
+				End:   999,
+			}, {
+				Start: 2000,
+				End:   2999,
+			}})
+		})
+
+		It("sorts slots", func() {
+			Expect(state.slots).To(Equal([]*clusterSlot{
+				{start: 0, end: 999, nodes: nil},
+				{start: 1000, end: 1999, nodes: nil},
+				{start: 2000, end: 2999, nodes: nil},
+			}))
+		})
+	})
+
+	Describe("loopback", func() {
+		BeforeEach(func() {
+			state = createClusterState([]ClusterSlot{{
+				Nodes: []ClusterNode{{Addr: "127.0.0.1:7001"}},
+			}, {
+				Nodes: []ClusterNode{{Addr: "127.0.0.1:7002"}},
+			}, {
+				Nodes: []ClusterNode{{Addr: "1.2.3.4:1234"}},
+			}, {
+				Nodes: []ClusterNode{{Addr: ":1234"}},
+			}})
+		})
+
+		It("replaces loopback hosts in addresses", func() {
+			slotAddr := func(slot *clusterSlot) string {
+				return slot.nodes[0].Client.Options().Addr
+			}
+
+			Expect(slotAddr(state.slots[0])).To(Equal("10.10.10.10:7001"))
+			Expect(slotAddr(state.slots[1])).To(Equal("10.10.10.10:7002"))
+			Expect(slotAddr(state.slots[2])).To(Equal("1.2.3.4:1234"))
+			Expect(slotAddr(state.slots[3])).To(Equal(":1234"))
+		})
+	})
+})

+ 75 - 0
iterator.go

@@ -0,0 +1,75 @@
+package redis
+
+import (
+	"sync"
+)
+
+// ScanIterator is used to incrementally iterate over a collection of elements.
+// It's safe for concurrent use by multiple goroutines.
+type ScanIterator struct {
+	mu  sync.Mutex // protects Scanner and pos
+	cmd *ScanCmd
+	pos int
+}
+
+// Err returns the last iterator error, if any.
+func (it *ScanIterator) Err() error {
+	it.mu.Lock()
+	err := it.cmd.Err()
+	it.mu.Unlock()
+	return err
+}
+
+// Next advances the cursor and returns true if more values can be read.
+func (it *ScanIterator) Next() bool {
+	it.mu.Lock()
+	defer it.mu.Unlock()
+
+	// Instantly return on errors.
+	if it.cmd.Err() != nil {
+		return false
+	}
+
+	// Advance cursor, check if we are still within range.
+	if it.pos < len(it.cmd.page) {
+		it.pos++
+		return true
+	}
+
+	for {
+		// Return if there is no more data to fetch.
+		if it.cmd.cursor == 0 {
+			return false
+		}
+
+		// Fetch next page.
+		if it.cmd.args[0] == "scan" {
+			it.cmd.args[1] = it.cmd.cursor
+		} else {
+			it.cmd.args[2] = it.cmd.cursor
+		}
+
+		err := it.cmd.process(it.cmd)
+		if err != nil {
+			return false
+		}
+
+		it.pos = 1
+
+		// Redis can occasionally return empty page.
+		if len(it.cmd.page) > 0 {
+			return true
+		}
+	}
+}
+
+// Val returns the key/field at the current cursor position.
+func (it *ScanIterator) Val() string {
+	var v string
+	it.mu.Lock()
+	if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
+		v = it.cmd.page[it.pos-1]
+	}
+	it.mu.Unlock()
+	return v
+}

+ 136 - 0
iterator_test.go

@@ -0,0 +1,136 @@
+package redis_test
+
+import (
+	"fmt"
+
+	"github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("ScanIterator", func() {
+	var client *redis.Client
+
+	var seed = func(n int) error {
+		pipe := client.Pipeline()
+		for i := 1; i <= n; i++ {
+			pipe.Set(fmt.Sprintf("K%02d", i), "x", 0).Err()
+		}
+		_, err := pipe.Exec()
+		return err
+	}
+
+	var extraSeed = func(n int, m int) error {
+		pipe := client.Pipeline()
+		for i := 1; i <= m; i++ {
+			pipe.Set(fmt.Sprintf("A%02d", i), "x", 0).Err()
+		}
+		for i := 1; i <= n; i++ {
+			pipe.Set(fmt.Sprintf("K%02d", i), "x", 0).Err()
+		}
+		_, err := pipe.Exec()
+		return err
+	}
+
+	var hashKey = "K_HASHTEST"
+	var hashSeed = func(n int) error {
+		pipe := client.Pipeline()
+		for i := 1; i <= n; i++ {
+			pipe.HSet(hashKey, fmt.Sprintf("K%02d", i), "x").Err()
+		}
+		_, err := pipe.Exec()
+		return err
+	}
+
+	BeforeEach(func() {
+		client = redis.NewClient(redisOptions())
+		Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("should scan across empty DBs", func() {
+		iter := client.Scan(0, "", 10).Iterator()
+		Expect(iter.Next()).To(BeFalse())
+		Expect(iter.Err()).NotTo(HaveOccurred())
+	})
+
+	It("should scan across one page", func() {
+		Expect(seed(7)).NotTo(HaveOccurred())
+
+		var vals []string
+		iter := client.Scan(0, "", 0).Iterator()
+		for iter.Next() {
+			vals = append(vals, iter.Val())
+		}
+		Expect(iter.Err()).NotTo(HaveOccurred())
+		Expect(vals).To(ConsistOf([]string{"K01", "K02", "K03", "K04", "K05", "K06", "K07"}))
+	})
+
+	It("should scan across multiple pages", func() {
+		Expect(seed(71)).NotTo(HaveOccurred())
+
+		var vals []string
+		iter := client.Scan(0, "", 10).Iterator()
+		for iter.Next() {
+			vals = append(vals, iter.Val())
+		}
+		Expect(iter.Err()).NotTo(HaveOccurred())
+		Expect(vals).To(HaveLen(71))
+		Expect(vals).To(ContainElement("K01"))
+		Expect(vals).To(ContainElement("K71"))
+	})
+
+	It("should hscan across multiple pages", func() {
+		Expect(hashSeed(71)).NotTo(HaveOccurred())
+
+		var vals []string
+		iter := client.HScan(hashKey, 0, "", 10).Iterator()
+		for iter.Next() {
+			vals = append(vals, iter.Val())
+		}
+		Expect(iter.Err()).NotTo(HaveOccurred())
+		Expect(vals).To(HaveLen(71 * 2))
+		Expect(vals).To(ContainElement("K01"))
+		Expect(vals).To(ContainElement("K71"))
+	})
+
+	It("should scan to page borders", func() {
+		Expect(seed(20)).NotTo(HaveOccurred())
+
+		var vals []string
+		iter := client.Scan(0, "", 10).Iterator()
+		for iter.Next() {
+			vals = append(vals, iter.Val())
+		}
+		Expect(iter.Err()).NotTo(HaveOccurred())
+		Expect(vals).To(HaveLen(20))
+	})
+
+	It("should scan with match", func() {
+		Expect(seed(33)).NotTo(HaveOccurred())
+
+		var vals []string
+		iter := client.Scan(0, "K*2*", 10).Iterator()
+		for iter.Next() {
+			vals = append(vals, iter.Val())
+		}
+		Expect(iter.Err()).NotTo(HaveOccurred())
+		Expect(vals).To(HaveLen(13))
+	})
+
+	It("should scan with match across empty pages", func() {
+		Expect(extraSeed(2, 10)).NotTo(HaveOccurred())
+
+		var vals []string
+		iter := client.Scan(0, "K*", 1).Iterator()
+		for iter.Next() {
+			vals = append(vals, iter.Val())
+		}
+		Expect(iter.Err()).NotTo(HaveOccurred())
+		Expect(vals).To(HaveLen(2))
+	})
+})

+ 372 - 0
main_test.go

@@ -0,0 +1,372 @@
+package redis_test
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+const (
+	redisPort          = "6380"
+	redisAddr          = ":" + redisPort
+	redisSecondaryPort = "6381"
+)
+
+const (
+	ringShard1Port = "6390"
+	ringShard2Port = "6391"
+	ringShard3Port = "6392"
+)
+
+const (
+	sentinelName       = "mymaster"
+	sentinelMasterPort = "8123"
+	sentinelSlave1Port = "8124"
+	sentinelSlave2Port = "8125"
+	sentinelPort       = "8126"
+)
+
+var (
+	redisMain                                                *redisProcess
+	ringShard1, ringShard2, ringShard3                       *redisProcess
+	sentinelMaster, sentinelSlave1, sentinelSlave2, sentinel *redisProcess
+)
+
+var cluster = &clusterScenario{
+	ports:     []string{"8220", "8221", "8222", "8223", "8224", "8225"},
+	nodeIDs:   make([]string, 6),
+	processes: make(map[string]*redisProcess, 6),
+	clients:   make(map[string]*redis.Client, 6),
+}
+
+var _ = BeforeSuite(func() {
+	var err error
+
+	redisMain, err = startRedis(redisPort)
+	Expect(err).NotTo(HaveOccurred())
+
+	ringShard1, err = startRedis(ringShard1Port)
+	Expect(err).NotTo(HaveOccurred())
+
+	ringShard2, err = startRedis(ringShard2Port)
+	Expect(err).NotTo(HaveOccurred())
+
+	ringShard3, err = startRedis(ringShard3Port)
+	Expect(err).NotTo(HaveOccurred())
+
+	sentinelMaster, err = startRedis(sentinelMasterPort)
+	Expect(err).NotTo(HaveOccurred())
+
+	sentinel, err = startSentinel(sentinelPort, sentinelName, sentinelMasterPort)
+	Expect(err).NotTo(HaveOccurred())
+
+	sentinelSlave1, err = startRedis(
+		sentinelSlave1Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
+	Expect(err).NotTo(HaveOccurred())
+
+	sentinelSlave2, err = startRedis(
+		sentinelSlave2Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
+	Expect(err).NotTo(HaveOccurred())
+
+	Expect(startCluster(cluster)).NotTo(HaveOccurred())
+})
+
+var _ = AfterSuite(func() {
+	Expect(redisMain.Close()).NotTo(HaveOccurred())
+
+	Expect(ringShard1.Close()).NotTo(HaveOccurred())
+	Expect(ringShard2.Close()).NotTo(HaveOccurred())
+	Expect(ringShard3.Close()).NotTo(HaveOccurred())
+
+	Expect(sentinel.Close()).NotTo(HaveOccurred())
+	Expect(sentinelSlave1.Close()).NotTo(HaveOccurred())
+	Expect(sentinelSlave2.Close()).NotTo(HaveOccurred())
+	Expect(sentinelMaster.Close()).NotTo(HaveOccurred())
+
+	Expect(stopCluster(cluster)).NotTo(HaveOccurred())
+})
+
+func TestGinkgoSuite(t *testing.T) {
+	RegisterFailHandler(Fail)
+	RunSpecs(t, "go-redis")
+}
+
+//------------------------------------------------------------------------------
+
+func redisOptions() *redis.Options {
+	return &redis.Options{
+		Addr:               redisAddr,
+		DB:                 15,
+		DialTimeout:        10 * time.Second,
+		ReadTimeout:        30 * time.Second,
+		WriteTimeout:       30 * time.Second,
+		PoolSize:           10,
+		PoolTimeout:        30 * time.Second,
+		IdleTimeout:        time.Minute,
+		IdleCheckFrequency: 100 * time.Millisecond,
+	}
+}
+
+func redisClusterOptions() *redis.ClusterOptions {
+	return &redis.ClusterOptions{
+		DialTimeout:        10 * time.Second,
+		ReadTimeout:        30 * time.Second,
+		WriteTimeout:       30 * time.Second,
+		PoolSize:           10,
+		PoolTimeout:        30 * time.Second,
+		IdleTimeout:        time.Minute,
+		IdleCheckFrequency: 100 * time.Millisecond,
+	}
+}
+
+func redisRingOptions() *redis.RingOptions {
+	return &redis.RingOptions{
+		Addrs: map[string]string{
+			"ringShardOne": ":" + ringShard1Port,
+			"ringShardTwo": ":" + ringShard2Port,
+		},
+		DialTimeout:        10 * time.Second,
+		ReadTimeout:        30 * time.Second,
+		WriteTimeout:       30 * time.Second,
+		PoolSize:           10,
+		PoolTimeout:        30 * time.Second,
+		IdleTimeout:        time.Minute,
+		IdleCheckFrequency: 100 * time.Millisecond,
+	}
+}
+
+func performAsync(n int, cbs ...func(int)) *sync.WaitGroup {
+	var wg sync.WaitGroup
+	for _, cb := range cbs {
+		for i := 0; i < n; i++ {
+			wg.Add(1)
+			go func(cb func(int), i int) {
+				defer GinkgoRecover()
+				defer wg.Done()
+
+				cb(i)
+			}(cb, i)
+		}
+	}
+	return &wg
+}
+
+func perform(n int, cbs ...func(int)) {
+	wg := performAsync(n, cbs...)
+	wg.Wait()
+}
+
+func eventually(fn func() error, timeout time.Duration) error {
+	errCh := make(chan error, 1)
+	done := make(chan struct{})
+	exit := make(chan struct{})
+
+	go func() {
+		for {
+			err := fn()
+			if err == nil {
+				close(done)
+				return
+			}
+
+			select {
+			case errCh <- err:
+			default:
+			}
+
+			select {
+			case <-exit:
+				return
+			case <-time.After(timeout / 100):
+			}
+		}
+	}()
+
+	select {
+	case <-done:
+		return nil
+	case <-time.After(timeout):
+		close(exit)
+		select {
+		case err := <-errCh:
+			return err
+		default:
+			return fmt.Errorf("timeout after %s without an error", timeout)
+		}
+	}
+}
+
+func execCmd(name string, args ...string) (*os.Process, error) {
+	cmd := exec.Command(name, args...)
+	if testing.Verbose() {
+		cmd.Stdout = os.Stdout
+		cmd.Stderr = os.Stderr
+	}
+	return cmd.Process, cmd.Start()
+}
+
+func connectTo(port string) (*redis.Client, error) {
+	client := redis.NewClient(&redis.Options{
+		Addr: ":" + port,
+	})
+
+	err := eventually(func() error {
+		return client.Ping().Err()
+	}, 30*time.Second)
+	if err != nil {
+		return nil, err
+	}
+
+	return client, nil
+}
+
+type redisProcess struct {
+	*os.Process
+	*redis.Client
+}
+
+func (p *redisProcess) Close() error {
+	if err := p.Kill(); err != nil {
+		return err
+	}
+
+	err := eventually(func() error {
+		if err := p.Client.Ping().Err(); err != nil {
+			return nil
+		}
+		return errors.New("client is not shutdown")
+	}, 10*time.Second)
+	if err != nil {
+		return err
+	}
+
+	p.Client.Close()
+	return nil
+}
+
+var (
+	redisServerBin, _  = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+	redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+
+func redisDir(port string) (string, error) {
+	dir, err := filepath.Abs(filepath.Join("testdata", "instances", port))
+	if err != nil {
+		return "", err
+	}
+	if err := os.RemoveAll(dir); err != nil {
+		return "", err
+	}
+	if err := os.MkdirAll(dir, 0775); err != nil {
+		return "", err
+	}
+	return dir, nil
+}
+
+func startRedis(port string, args ...string) (*redisProcess, error) {
+	dir, err := redisDir(port)
+	if err != nil {
+		return nil, err
+	}
+	if err = exec.Command("cp", "-f", redisServerConf, dir).Run(); err != nil {
+		return nil, err
+	}
+
+	baseArgs := []string{filepath.Join(dir, "redis.conf"), "--port", port, "--dir", dir}
+	process, err := execCmd(redisServerBin, append(baseArgs, args...)...)
+	if err != nil {
+		return nil, err
+	}
+
+	client, err := connectTo(port)
+	if err != nil {
+		process.Kill()
+		return nil, err
+	}
+	return &redisProcess{process, client}, err
+}
+
+func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
+	dir, err := redisDir(port)
+	if err != nil {
+		return nil, err
+	}
+	process, err := execCmd(redisServerBin, os.DevNull, "--sentinel", "--port", port, "--dir", dir)
+	if err != nil {
+		return nil, err
+	}
+	client, err := connectTo(port)
+	if err != nil {
+		process.Kill()
+		return nil, err
+	}
+	for _, cmd := range []*redis.StatusCmd{
+		redis.NewStatusCmd("SENTINEL", "MONITOR", masterName, "127.0.0.1", masterPort, "1"),
+		redis.NewStatusCmd("SENTINEL", "SET", masterName, "down-after-milliseconds", "500"),
+		redis.NewStatusCmd("SENTINEL", "SET", masterName, "failover-timeout", "1000"),
+		redis.NewStatusCmd("SENTINEL", "SET", masterName, "parallel-syncs", "1"),
+	} {
+		client.Process(cmd)
+		if err := cmd.Err(); err != nil {
+			process.Kill()
+			return nil, err
+		}
+	}
+	return &redisProcess{process, client}, nil
+}
+
+//------------------------------------------------------------------------------
+
+type badConnError string
+
+func (e badConnError) Error() string   { return string(e) }
+func (e badConnError) Timeout() bool   { return false }
+func (e badConnError) Temporary() bool { return false }
+
+type badConn struct {
+	net.TCPConn
+
+	readDelay, writeDelay time.Duration
+	readErr, writeErr     error
+}
+
+var _ net.Conn = &badConn{}
+
+func (cn *badConn) SetReadDeadline(t time.Time) error {
+	return nil
+}
+
+func (cn *badConn) SetWriteDeadline(t time.Time) error {
+	return nil
+}
+
+func (cn *badConn) Read([]byte) (int, error) {
+	if cn.readDelay != 0 {
+		time.Sleep(cn.readDelay)
+	}
+	if cn.readErr != nil {
+		return 0, cn.readErr
+	}
+	return 0, badConnError("bad connection")
+}
+
+func (cn *badConn) Write([]byte) (int, error) {
+	if cn.writeDelay != 0 {
+		time.Sleep(cn.writeDelay)
+	}
+	if cn.writeErr != nil {
+		return 0, cn.writeErr
+	}
+	return 0, badConnError("bad connection")
+}

+ 243 - 0
options.go

@@ -0,0 +1,243 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/go-redis/redis/internal/pool"
+)
+
+// Limiter is the interface of a rate limiter or a circuit breaker.
+type Limiter interface {
+	// Allow returns nil if operation is allowed or an error otherwise.
+	// If operation is allowed client must ReportResult of the operation
+	// whether it is a success or a failure.
+	Allow() error
+	// ReportResult reports the result of the previously allowed operation.
+	// nil indicates a success, non-nil error usually indicates a failure.
+	ReportResult(result error)
+}
+
+type Options struct {
+	// The network type, either tcp or unix.
+	// Default is tcp.
+	Network string
+	// host:port address.
+	Addr string
+
+	// Dialer creates new network connection and has priority over
+	// Network and Addr options.
+	Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+	// Hook that is called when new connection is established.
+	OnConnect func(*Conn) error
+
+	// Optional password. Must match the password specified in the
+	// requirepass server configuration option.
+	Password string
+	// Database to be selected after connecting to the server.
+	DB int
+
+	// Maximum number of retries before giving up.
+	// Default is to not retry failed commands.
+	MaxRetries int
+	// Minimum backoff between each retry.
+	// Default is 8 milliseconds; -1 disables backoff.
+	MinRetryBackoff time.Duration
+	// Maximum backoff between each retry.
+	// Default is 512 milliseconds; -1 disables backoff.
+	MaxRetryBackoff time.Duration
+
+	// Dial timeout for establishing new connections.
+	// Default is 5 seconds.
+	DialTimeout time.Duration
+	// Timeout for socket reads. If reached, commands will fail
+	// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
+	// Default is 3 seconds.
+	ReadTimeout time.Duration
+	// Timeout for socket writes. If reached, commands will fail
+	// with a timeout instead of blocking.
+	// Default is ReadTimeout.
+	WriteTimeout time.Duration
+
+	// Maximum number of socket connections.
+	// Default is 10 connections per every CPU as reported by runtime.NumCPU.
+	PoolSize int
+	// Minimum number of idle connections which is useful when establishing
+	// new connection is slow.
+	MinIdleConns int
+	// Connection age at which client retires (closes) the connection.
+	// Default is to not close aged connections.
+	MaxConnAge time.Duration
+	// Amount of time client waits for connection if all connections
+	// are busy before returning an error.
+	// Default is ReadTimeout + 1 second.
+	PoolTimeout time.Duration
+	// Amount of time after which client closes idle connections.
+	// Should be less than server's timeout.
+	// Default is 5 minutes. -1 disables idle timeout check.
+	IdleTimeout time.Duration
+	// Frequency of idle checks made by idle connections reaper.
+	// Default is 1 minute. -1 disables idle connections reaper,
+	// but idle connections are still discarded by the client
+	// if IdleTimeout is set.
+	IdleCheckFrequency time.Duration
+
+	// Enables read only queries on slave nodes.
+	readOnly bool
+
+	// TLS Config to use. When set TLS will be negotiated.
+	TLSConfig *tls.Config
+
+	// Limiter interface used to implemented circuit breaker or rate limiter.
+	Limiter Limiter
+}
+
+func (opt *Options) init() {
+	if opt.Addr == "" {
+		opt.Addr = "localhost:6379"
+	}
+	if opt.Network == "" {
+		if strings.HasPrefix(opt.Addr, "/") {
+			opt.Network = "unix"
+		} else {
+			opt.Network = "tcp"
+		}
+	}
+	if opt.Dialer == nil {
+		opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
+			netDialer := &net.Dialer{
+				Timeout:   opt.DialTimeout,
+				KeepAlive: 5 * time.Minute,
+			}
+			if opt.TLSConfig == nil {
+				return netDialer.DialContext(ctx, network, addr)
+			}
+			return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
+		}
+	}
+	if opt.PoolSize == 0 {
+		opt.PoolSize = 10 * runtime.NumCPU()
+	}
+	if opt.DialTimeout == 0 {
+		opt.DialTimeout = 5 * time.Second
+	}
+	switch opt.ReadTimeout {
+	case -1:
+		opt.ReadTimeout = 0
+	case 0:
+		opt.ReadTimeout = 3 * time.Second
+	}
+	switch opt.WriteTimeout {
+	case -1:
+		opt.WriteTimeout = 0
+	case 0:
+		opt.WriteTimeout = opt.ReadTimeout
+	}
+	if opt.PoolTimeout == 0 {
+		opt.PoolTimeout = opt.ReadTimeout + time.Second
+	}
+	if opt.IdleTimeout == 0 {
+		opt.IdleTimeout = 5 * time.Minute
+	}
+	if opt.IdleCheckFrequency == 0 {
+		opt.IdleCheckFrequency = time.Minute
+	}
+
+	if opt.MaxRetries == -1 {
+		opt.MaxRetries = 0
+	}
+	switch opt.MinRetryBackoff {
+	case -1:
+		opt.MinRetryBackoff = 0
+	case 0:
+		opt.MinRetryBackoff = 8 * time.Millisecond
+	}
+	switch opt.MaxRetryBackoff {
+	case -1:
+		opt.MaxRetryBackoff = 0
+	case 0:
+		opt.MaxRetryBackoff = 512 * time.Millisecond
+	}
+}
+
+func (opt *Options) clone() *Options {
+	clone := *opt
+	return &clone
+}
+
+// ParseURL parses an URL into Options that can be used to connect to Redis.
+func ParseURL(redisURL string) (*Options, error) {
+	o := &Options{Network: "tcp"}
+	u, err := url.Parse(redisURL)
+	if err != nil {
+		return nil, err
+	}
+
+	if u.Scheme != "redis" && u.Scheme != "rediss" {
+		return nil, errors.New("invalid redis URL scheme: " + u.Scheme)
+	}
+
+	if u.User != nil {
+		if p, ok := u.User.Password(); ok {
+			o.Password = p
+		}
+	}
+
+	if len(u.Query()) > 0 {
+		return nil, errors.New("no options supported")
+	}
+
+	h, p, err := net.SplitHostPort(u.Host)
+	if err != nil {
+		h = u.Host
+	}
+	if h == "" {
+		h = "localhost"
+	}
+	if p == "" {
+		p = "6379"
+	}
+	o.Addr = net.JoinHostPort(h, p)
+
+	f := strings.FieldsFunc(u.Path, func(r rune) bool {
+		return r == '/'
+	})
+	switch len(f) {
+	case 0:
+		o.DB = 0
+	case 1:
+		if o.DB, err = strconv.Atoi(f[0]); err != nil {
+			return nil, fmt.Errorf("invalid redis database number: %q", f[0])
+		}
+	default:
+		return nil, errors.New("invalid redis URL path: " + u.Path)
+	}
+
+	if u.Scheme == "rediss" {
+		o.TLSConfig = &tls.Config{ServerName: h}
+	}
+	return o, nil
+}
+
+func newConnPool(opt *Options) *pool.ConnPool {
+	return pool.NewConnPool(&pool.Options{
+		Dialer: func(ctx context.Context) (net.Conn, error) {
+			return opt.Dialer(ctx, opt.Network, opt.Addr)
+		},
+		PoolSize:           opt.PoolSize,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+	})
+}

+ 119 - 0
options_test.go

@@ -0,0 +1,119 @@
+// +build go1.7
+
+package redis
+
+import (
+	"errors"
+	"testing"
+	"time"
+)
+
+func TestParseURL(t *testing.T) {
+	cases := []struct {
+		u    string
+		addr string
+		db   int
+		tls  bool
+		err  error
+	}{
+		{
+			"redis://localhost:123/1",
+			"localhost:123",
+			1, false, nil,
+		},
+		{
+			"redis://localhost:123",
+			"localhost:123",
+			0, false, nil,
+		},
+		{
+			"redis://localhost/1",
+			"localhost:6379",
+			1, false, nil,
+		},
+		{
+			"redis://12345",
+			"12345:6379",
+			0, false, nil,
+		},
+		{
+			"rediss://localhost:123",
+			"localhost:123",
+			0, true, nil,
+		},
+		{
+			"redis://localhost/?abc=123",
+			"",
+			0, false, errors.New("no options supported"),
+		},
+		{
+			"http://google.com",
+			"",
+			0, false, errors.New("invalid redis URL scheme: http"),
+		},
+		{
+			"redis://localhost/1/2/3/4",
+			"",
+			0, false, errors.New("invalid redis URL path: /1/2/3/4"),
+		},
+		{
+			"12345",
+			"",
+			0, false, errors.New("invalid redis URL scheme: "),
+		},
+		{
+			"redis://localhost/iamadatabase",
+			"",
+			0, false, errors.New(`invalid redis database number: "iamadatabase"`),
+		},
+	}
+
+	for _, c := range cases {
+		t.Run(c.u, func(t *testing.T) {
+			o, err := ParseURL(c.u)
+			if c.err == nil && err != nil {
+				t.Fatalf("unexpected error: %q", err)
+				return
+			}
+			if c.err != nil && err != nil {
+				if c.err.Error() != err.Error() {
+					t.Fatalf("got %q, expected %q", err, c.err)
+				}
+				return
+			}
+			if o.Addr != c.addr {
+				t.Errorf("got %q, want %q", o.Addr, c.addr)
+			}
+			if o.DB != c.db {
+				t.Errorf("got %q, expected %q", o.DB, c.db)
+			}
+			if c.tls && o.TLSConfig == nil {
+				t.Errorf("got nil TLSConfig, expected a TLSConfig")
+			}
+		})
+	}
+}
+
+// Test ReadTimeout option initialization, including special values -1 and 0.
+// And also test behaviour of WriteTimeout option, when it is not explicitly set and use
+// ReadTimeout value.
+func TestReadTimeoutOptions(t *testing.T) {
+	testDataInputOutputMap := map[time.Duration]time.Duration{
+		-1: 0 * time.Second,
+		0:  3 * time.Second,
+		1:  1 * time.Nanosecond,
+		3:  3 * time.Nanosecond,
+	}
+
+	for in, out := range testDataInputOutputMap {
+		o := &Options{ReadTimeout: in}
+		o.init()
+		if o.ReadTimeout != out {
+			t.Errorf("got %d instead of %d as ReadTimeout option", o.ReadTimeout, out)
+		}
+
+		if o.WriteTimeout != o.ReadTimeout {
+			t.Errorf("got %d instead of %d as WriteTimeout option", o.WriteTimeout, o.ReadTimeout)
+		}
+	}
+}

+ 142 - 0
pipeline.go

@@ -0,0 +1,142 @@
+package redis
+
+import (
+	"context"
+	"sync"
+
+	"github.com/go-redis/redis/internal/pool"
+)
+
+type pipelineExecer func(context.Context, []Cmder) error
+
+// Pipeliner is an mechanism to realise Redis Pipeline technique.
+//
+// Pipelining is a technique to extremely speed up processing by packing
+// operations to batches, send them at once to Redis and read a replies in a
+// singe step.
+// See https://redis.io/topics/pipelining
+//
+// Pay attention, that Pipeline is not a transaction, so you can get unexpected
+// results in case of big pipelines and small read/write timeouts.
+// Redis client has retransmission logic in case of timeouts, pipeline
+// can be retransmitted and commands can be executed more then once.
+// To avoid this: it is good idea to use reasonable bigger read/write timeouts
+// depends of your batch size and/or use TxPipeline.
+type Pipeliner interface {
+	StatefulCmdable
+	Do(args ...interface{}) *Cmd
+	Process(cmd Cmder) error
+	Close() error
+	Discard() error
+	Exec() ([]Cmder, error)
+	ExecContext(ctx context.Context) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining. It's safe for concurrent use
+// by multiple goroutines.
+type Pipeline struct {
+	cmdable
+	statefulCmdable
+
+	ctx  context.Context
+	exec pipelineExecer
+
+	mu     sync.Mutex
+	cmds   []Cmder
+	closed bool
+}
+
+func (c *Pipeline) init() {
+	c.cmdable = c.Process
+	c.statefulCmdable = c.Process
+}
+
+func (c *Pipeline) Do(args ...interface{}) *Cmd {
+	cmd := NewCmd(args...)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Process queues the cmd for later execution.
+func (c *Pipeline) Process(cmd Cmder) error {
+	c.mu.Lock()
+	c.cmds = append(c.cmds, cmd)
+	c.mu.Unlock()
+	return nil
+}
+
+// Close closes the pipeline, releasing any open resources.
+func (c *Pipeline) Close() error {
+	c.mu.Lock()
+	_ = c.discard()
+	c.closed = true
+	c.mu.Unlock()
+	return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() error {
+	c.mu.Lock()
+	err := c.discard()
+	c.mu.Unlock()
+	return err
+}
+
+func (c *Pipeline) discard() error {
+	if c.closed {
+		return pool.ErrClosed
+	}
+	c.cmds = c.cmds[:0]
+	return nil
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec() ([]Cmder, error) {
+	return c.ExecContext(c.ctx)
+}
+
+func (c *Pipeline) ExecContext(ctx context.Context) ([]Cmder, error) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	if len(c.cmds) == 0 {
+		return nil, nil
+	}
+
+	cmds := c.cmds
+	c.cmds = nil
+
+	return cmds, c.exec(ctx, cmds)
+}
+
+func (c *Pipeline) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	if err := fn(c); err != nil {
+		return nil, err
+	}
+	cmds, err := c.Exec()
+	_ = c.Close()
+	return cmds, err
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+	return c
+}
+
+func (c *Pipeline) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipelined(fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+	return c
+}

+ 87 - 0
pipeline_test.go

@@ -0,0 +1,87 @@
+package redis_test
+
+import (
+	"github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("pipelining", func() {
+	var client *redis.Client
+	var pipe *redis.Pipeline
+
+	BeforeEach(func() {
+		client = redis.NewClient(redisOptions())
+		Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("supports block style", func() {
+		var get *redis.StringCmd
+		cmds, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+			get = pipe.Get("foo")
+			return nil
+		})
+		Expect(err).To(Equal(redis.Nil))
+		Expect(cmds).To(HaveLen(1))
+		Expect(cmds[0]).To(Equal(get))
+		Expect(get.Err()).To(Equal(redis.Nil))
+		Expect(get.Val()).To(Equal(""))
+	})
+
+	assertPipeline := func() {
+		It("returns no errors when there are no commands", func() {
+			_, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		It("discards queued commands", func() {
+			pipe.Get("key")
+			pipe.Discard()
+			cmds, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(cmds).To(BeNil())
+		})
+
+		It("handles val/err", func() {
+			err := client.Set("key", "value", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+
+			get := pipe.Get("key")
+			cmds, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(cmds).To(HaveLen(1))
+
+			val, err := get.Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal("value"))
+		})
+
+		It("supports custom command", func() {
+			pipe.Do("ping")
+			cmds, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(cmds).To(HaveLen(1))
+		})
+	}
+
+	Describe("Pipeline", func() {
+		BeforeEach(func() {
+			pipe = client.Pipeline().(*redis.Pipeline)
+		})
+
+		assertPipeline()
+	})
+
+	Describe("TxPipeline", func() {
+		BeforeEach(func() {
+			pipe = client.TxPipeline().(*redis.Pipeline)
+		})
+
+		assertPipeline()
+	})
+})

+ 150 - 0
pool_test.go

@@ -0,0 +1,150 @@
+package redis_test
+
+import (
+	"context"
+	"time"
+
+	"github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("pool", func() {
+	var client *redis.Client
+
+	BeforeEach(func() {
+		opt := redisOptions()
+		opt.MinIdleConns = 0
+		opt.MaxConnAge = 0
+		opt.IdleTimeout = time.Second
+		client = redis.NewClient(opt)
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("respects max size", func() {
+		perform(1000, func(id int) {
+			val, err := client.Ping().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal("PONG"))
+		})
+
+		pool := client.Pool()
+		Expect(pool.Len()).To(BeNumerically("<=", 10))
+		Expect(pool.IdleLen()).To(BeNumerically("<=", 10))
+		Expect(pool.Len()).To(Equal(pool.IdleLen()))
+	})
+
+	It("respects max size on multi", func() {
+		perform(1000, func(id int) {
+			var ping *redis.StatusCmd
+
+			err := client.Watch(func(tx *redis.Tx) error {
+				cmds, err := tx.Pipelined(func(pipe redis.Pipeliner) error {
+					ping = pipe.Ping()
+					return nil
+				})
+				Expect(err).NotTo(HaveOccurred())
+				Expect(cmds).To(HaveLen(1))
+				return err
+			})
+			Expect(err).NotTo(HaveOccurred())
+
+			Expect(ping.Err()).NotTo(HaveOccurred())
+			Expect(ping.Val()).To(Equal("PONG"))
+		})
+
+		pool := client.Pool()
+		Expect(pool.Len()).To(BeNumerically("<=", 10))
+		Expect(pool.IdleLen()).To(BeNumerically("<=", 10))
+		Expect(pool.Len()).To(Equal(pool.IdleLen()))
+	})
+
+	It("respects max size on pipelines", func() {
+		perform(1000, func(id int) {
+			pipe := client.Pipeline()
+			ping := pipe.Ping()
+			cmds, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(cmds).To(HaveLen(1))
+			Expect(ping.Err()).NotTo(HaveOccurred())
+			Expect(ping.Val()).To(Equal("PONG"))
+			Expect(pipe.Close()).NotTo(HaveOccurred())
+		})
+
+		pool := client.Pool()
+		Expect(pool.Len()).To(BeNumerically("<=", 10))
+		Expect(pool.IdleLen()).To(BeNumerically("<=", 10))
+		Expect(pool.Len()).To(Equal(pool.IdleLen()))
+	})
+
+	It("removes broken connections", func() {
+		cn, err := client.Pool().Get(context.Background())
+		Expect(err).NotTo(HaveOccurred())
+		cn.SetNetConn(&badConn{})
+		client.Pool().Put(cn)
+
+		err = client.Ping().Err()
+		Expect(err).To(MatchError("bad connection"))
+
+		val, err := client.Ping().Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(val).To(Equal("PONG"))
+
+		pool := client.Pool()
+		Expect(pool.Len()).To(Equal(1))
+		Expect(pool.IdleLen()).To(Equal(1))
+
+		stats := pool.Stats()
+		Expect(stats.Hits).To(Equal(uint32(1)))
+		Expect(stats.Misses).To(Equal(uint32(2)))
+		Expect(stats.Timeouts).To(Equal(uint32(0)))
+	})
+
+	It("reuses connections", func() {
+		for i := 0; i < 100; i++ {
+			val, err := client.Ping().Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(val).To(Equal("PONG"))
+		}
+
+		pool := client.Pool()
+		Expect(pool.Len()).To(Equal(1))
+		Expect(pool.IdleLen()).To(Equal(1))
+
+		stats := pool.Stats()
+		Expect(stats.Hits).To(Equal(uint32(99)))
+		Expect(stats.Misses).To(Equal(uint32(1)))
+		Expect(stats.Timeouts).To(Equal(uint32(0)))
+	})
+
+	It("removes idle connections", func() {
+		err := client.Ping().Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		stats := client.PoolStats()
+		Expect(stats).To(Equal(&redis.PoolStats{
+			Hits:       0,
+			Misses:     1,
+			Timeouts:   0,
+			TotalConns: 1,
+			IdleConns:  1,
+			StaleConns: 0,
+		}))
+
+		time.Sleep(2 * time.Second)
+
+		stats = client.PoolStats()
+		Expect(stats).To(Equal(&redis.PoolStats{
+			Hits:       0,
+			Misses:     1,
+			Timeouts:   0,
+			TotalConns: 0,
+			IdleConns:  0,
+			StaleConns: 1,
+		}))
+	})
+})

+ 593 - 0
pubsub.go

@@ -0,0 +1,593 @@
+package redis
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/internal"
+	"github.com/go-redis/redis/internal/pool"
+	"github.com/go-redis/redis/internal/proto"
+)
+
+const pingTimeout = 30 * time.Second
+
+var errPingTimeout = errors.New("redis: ping timeout")
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. Message receiving is NOT safe
+// for concurrent use by multiple goroutines.
+//
+// PubSub automatically reconnects to Redis Server and resubscribes
+// to the channels in case of network errors.
+type PubSub struct {
+	opt *Options
+
+	newConn   func([]string) (*pool.Conn, error)
+	closeConn func(*pool.Conn) error
+
+	mu       sync.Mutex
+	cn       *pool.Conn
+	channels map[string]struct{}
+	patterns map[string]struct{}
+
+	closed bool
+	exit   chan struct{}
+
+	cmd *Cmd
+
+	chOnce sync.Once
+	msgCh  chan *Message
+	allCh  chan interface{}
+	ping   chan struct{}
+}
+
+func (c *PubSub) String() string {
+	channels := mapKeys(c.channels)
+	channels = append(channels, mapKeys(c.patterns)...)
+	return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
+}
+
+func (c *PubSub) init() {
+	c.exit = make(chan struct{})
+}
+
+func (c *PubSub) connWithLock() (*pool.Conn, error) {
+	c.mu.Lock()
+	cn, err := c.conn(nil)
+	c.mu.Unlock()
+	return cn, err
+}
+
+func (c *PubSub) conn(newChannels []string) (*pool.Conn, error) {
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+	if c.cn != nil {
+		return c.cn, nil
+	}
+
+	channels := mapKeys(c.channels)
+	channels = append(channels, newChannels...)
+
+	cn, err := c.newConn(channels)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := c.resubscribe(cn); err != nil {
+		_ = c.closeConn(cn)
+		return nil, err
+	}
+
+	c.cn = cn
+	return cn, nil
+}
+
+func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
+	return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+		return writeCmd(wr, cmd)
+	})
+}
+
+func (c *PubSub) resubscribe(cn *pool.Conn) error {
+	var firstErr error
+
+	if len(c.channels) > 0 {
+		firstErr = c._subscribe(cn, "subscribe", mapKeys(c.channels))
+	}
+
+	if len(c.patterns) > 0 {
+		err := c._subscribe(cn, "psubscribe", mapKeys(c.patterns))
+		if err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+
+	return firstErr
+}
+
+func mapKeys(m map[string]struct{}) []string {
+	s := make([]string, len(m))
+	i := 0
+	for k := range m {
+		s[i] = k
+		i++
+	}
+	return s
+}
+
+func (c *PubSub) _subscribe(
+	cn *pool.Conn, redisCmd string, channels []string,
+) error {
+	args := make([]interface{}, 0, 1+len(channels))
+	args = append(args, redisCmd)
+	for _, channel := range channels {
+		args = append(args, channel)
+	}
+	cmd := NewSliceCmd(args...)
+	return c.writeCmd(context.TODO(), cn, cmd)
+}
+
+func (c *PubSub) releaseConnWithLock(cn *pool.Conn, err error, allowTimeout bool) {
+	c.mu.Lock()
+	c.releaseConn(cn, err, allowTimeout)
+	c.mu.Unlock()
+}
+
+func (c *PubSub) releaseConn(cn *pool.Conn, err error, allowTimeout bool) {
+	if c.cn != cn {
+		return
+	}
+	if isBadConn(err, allowTimeout) {
+		c.reconnect(err)
+	}
+}
+
+func (c *PubSub) reconnect(reason error) {
+	_ = c.closeTheCn(reason)
+	_, _ = c.conn(nil)
+}
+
+func (c *PubSub) closeTheCn(reason error) error {
+	if c.cn == nil {
+		return nil
+	}
+	if !c.closed {
+		internal.Logger.Printf("redis: discarding bad PubSub connection: %s", reason)
+	}
+	err := c.closeConn(c.cn)
+	c.cn = nil
+	return err
+}
+
+func (c *PubSub) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return pool.ErrClosed
+	}
+	c.closed = true
+	close(c.exit)
+
+	return c.closeTheCn(pool.ErrClosed)
+}
+
+// Subscribe the client to the specified channels. It returns
+// empty subscription if there are no channels.
+func (c *PubSub) Subscribe(channels ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	err := c.subscribe("subscribe", channels...)
+	if c.channels == nil {
+		c.channels = make(map[string]struct{})
+	}
+	for _, s := range channels {
+		c.channels[s] = struct{}{}
+	}
+	return err
+}
+
+// PSubscribe the client to the given patterns. It returns
+// empty subscription if there are no patterns.
+func (c *PubSub) PSubscribe(patterns ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	err := c.subscribe("psubscribe", patterns...)
+	if c.patterns == nil {
+		c.patterns = make(map[string]struct{})
+	}
+	for _, s := range patterns {
+		c.patterns[s] = struct{}{}
+	}
+	return err
+}
+
+// Unsubscribe the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(channels ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	for _, channel := range channels {
+		delete(c.channels, channel)
+	}
+	err := c.subscribe("unsubscribe", channels...)
+	return err
+}
+
+// PUnsubscribe the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(patterns ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	for _, pattern := range patterns {
+		delete(c.patterns, pattern)
+	}
+	err := c.subscribe("punsubscribe", patterns...)
+	return err
+}
+
+func (c *PubSub) subscribe(redisCmd string, channels ...string) error {
+	cn, err := c.conn(channels)
+	if err != nil {
+		return err
+	}
+
+	err = c._subscribe(cn, redisCmd, channels)
+	c.releaseConn(cn, err, false)
+	return err
+}
+
+func (c *PubSub) Ping(payload ...string) error {
+	args := []interface{}{"ping"}
+	if len(payload) == 1 {
+		args = append(args, payload[0])
+	}
+	cmd := NewCmd(args...)
+
+	cn, err := c.connWithLock()
+	if err != nil {
+		return err
+	}
+
+	err = c.writeCmd(context.TODO(), cn, cmd)
+	c.releaseConnWithLock(cn, err, false)
+	return err
+}
+
+// Subscription received after a successful subscription to channel.
+type Subscription struct {
+	// Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+	Kind string
+	// Channel name we have subscribed to.
+	Channel string
+	// Number of channels we are currently subscribed to.
+	Count int
+}
+
+func (m *Subscription) String() string {
+	return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+	Channel string
+	Pattern string
+	Payload string
+}
+
+func (m *Message) String() string {
+	return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+	Payload string
+}
+
+func (p *Pong) String() string {
+	if p.Payload != "" {
+		return fmt.Sprintf("Pong<%s>", p.Payload)
+	}
+	return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+	switch reply := reply.(type) {
+	case string:
+		return &Pong{
+			Payload: reply,
+		}, nil
+	case []interface{}:
+		switch kind := reply[0].(string); kind {
+		case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+			return &Subscription{
+				Kind:    kind,
+				Channel: reply[1].(string),
+				Count:   int(reply[2].(int64)),
+			}, nil
+		case "message":
+			return &Message{
+				Channel: reply[1].(string),
+				Payload: reply[2].(string),
+			}, nil
+		case "pmessage":
+			return &Message{
+				Pattern: reply[1].(string),
+				Channel: reply[2].(string),
+				Payload: reply[3].(string),
+			}, nil
+		case "pong":
+			return &Pong{
+				Payload: reply[1].(string),
+			}, nil
+		default:
+			return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+		}
+	default:
+		return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+	}
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
+	if c.cmd == nil {
+		c.cmd = NewCmd()
+	}
+
+	cn, err := c.connWithLock()
+	if err != nil {
+		return nil, err
+	}
+
+	err = cn.WithReader(context.TODO(), timeout, func(rd *proto.Reader) error {
+		return c.cmd.readReply(rd)
+	})
+
+	c.releaseConnWithLock(cn, err, timeout > 0)
+	if err != nil {
+		return nil, err
+	}
+
+	return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) Receive() (interface{}, error) {
+	return c.ReceiveTimeout(0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription and Pong
+// messages. This is low-level API and in most cases Channel should be used
+// instead.
+func (c *PubSub) ReceiveMessage() (*Message, error) {
+	for {
+		msg, err := c.Receive()
+		if err != nil {
+			return nil, err
+		}
+
+		switch msg := msg.(type) {
+		case *Subscription:
+			// Ignore.
+		case *Pong:
+			// Ignore.
+		case *Message:
+			return msg, nil
+		default:
+			err := fmt.Errorf("redis: unknown message: %T", msg)
+			return nil, err
+		}
+	}
+}
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 30 seconds the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not not received for 30 seconds.
+func (c *PubSub) Channel() <-chan *Message {
+	return c.ChannelSize(100)
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+	c.chOnce.Do(func() {
+		c.initPing()
+		c.initMsgChan(size)
+	})
+	if c.msgCh == nil {
+		err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+		panic(err)
+	}
+	if cap(c.msgCh) != size {
+		err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
+		panic(err)
+	}
+	return c.msgCh
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(size int) <-chan interface{} {
+	c.chOnce.Do(func() {
+		c.initPing()
+		c.initAllChan(size)
+	})
+	if c.allCh == nil {
+		err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+		panic(err)
+	}
+	if cap(c.allCh) != size {
+		err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
+		panic(err)
+	}
+	return c.allCh
+}
+
+func (c *PubSub) initPing() {
+	c.ping = make(chan struct{}, 1)
+	go func() {
+		timer := time.NewTimer(pingTimeout)
+		timer.Stop()
+
+		healthy := true
+		for {
+			timer.Reset(pingTimeout)
+			select {
+			case <-c.ping:
+				healthy = true
+				if !timer.Stop() {
+					<-timer.C
+				}
+			case <-timer.C:
+				pingErr := c.Ping()
+				if healthy {
+					healthy = false
+				} else {
+					if pingErr == nil {
+						pingErr = errPingTimeout
+					}
+					c.mu.Lock()
+					c.reconnect(pingErr)
+					healthy = true
+					c.mu.Unlock()
+				}
+			case <-c.exit:
+				return
+			}
+		}
+	}()
+}
+
+// initMsgChan must be in sync with initAllChan.
+func (c *PubSub) initMsgChan(size int) {
+	c.msgCh = make(chan *Message, size)
+	go func() {
+		timer := time.NewTimer(pingTimeout)
+		timer.Stop()
+
+		var errCount int
+		for {
+			msg, err := c.Receive()
+			if err != nil {
+				if err == pool.ErrClosed {
+					close(c.msgCh)
+					return
+				}
+				if errCount > 0 {
+					time.Sleep(c.retryBackoff(errCount))
+				}
+				errCount++
+				continue
+			}
+
+			errCount = 0
+
+			// Any message is as good as a ping.
+			select {
+			case c.ping <- struct{}{}:
+			default:
+			}
+
+			switch msg := msg.(type) {
+			case *Subscription:
+				// Ignore.
+			case *Pong:
+				// Ignore.
+			case *Message:
+				timer.Reset(pingTimeout)
+				select {
+				case c.msgCh <- msg:
+					if !timer.Stop() {
+						<-timer.C
+					}
+				case <-timer.C:
+					internal.Logger.Printf(
+						"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
+				}
+			default:
+				internal.Logger.Printf("redis: unknown message type: %T", msg)
+			}
+		}
+	}()
+}
+
+// initAllChan must be in sync with initMsgChan.
+func (c *PubSub) initAllChan(size int) {
+	c.allCh = make(chan interface{}, size)
+	go func() {
+		timer := time.NewTimer(pingTimeout)
+		timer.Stop()
+
+		var errCount int
+		for {
+			msg, err := c.Receive()
+			if err != nil {
+				if err == pool.ErrClosed {
+					close(c.allCh)
+					return
+				}
+				if errCount > 0 {
+					time.Sleep(c.retryBackoff(errCount))
+				}
+				errCount++
+				continue
+			}
+
+			errCount = 0
+
+			// Any message is as good as a ping.
+			select {
+			case c.ping <- struct{}{}:
+			default:
+			}
+
+			switch msg := msg.(type) {
+			case *Subscription:
+				c.sendMessage(msg, timer)
+			case *Pong:
+				// Ignore.
+			case *Message:
+				c.sendMessage(msg, timer)
+			default:
+				internal.Logger.Printf("redis: unknown message type: %T", msg)
+			}
+		}
+	}()
+}
+
+func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
+	timer.Reset(pingTimeout)
+	select {
+	case c.allCh <- msg:
+		if !timer.Stop() {
+			<-timer.C
+		}
+	case <-timer.C:
+		internal.Logger.Printf(
+			"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
+	}
+}
+
+func (c *PubSub) retryBackoff(attempt int) time.Duration {
+	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}

+ 446 - 0
pubsub_test.go

@@ -0,0 +1,446 @@
+package redis_test
+
+import (
+	"io"
+	"net"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("PubSub", func() {
+	var client *redis.Client
+
+	BeforeEach(func() {
+		opt := redisOptions()
+		opt.MinIdleConns = 0
+		opt.MaxConnAge = 0
+		client = redis.NewClient(opt)
+		Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("implements Stringer", func() {
+		pubsub := client.PSubscribe("mychannel*")
+		defer pubsub.Close()
+
+		Expect(pubsub.String()).To(Equal("PubSub(mychannel*)"))
+	})
+
+	It("should support pattern matching", func() {
+		pubsub := client.PSubscribe("mychannel*")
+		defer pubsub.Close()
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err).NotTo(HaveOccurred())
+			subscr := msgi.(*redis.Subscription)
+			Expect(subscr.Kind).To(Equal("psubscribe"))
+			Expect(subscr.Channel).To(Equal("mychannel*"))
+			Expect(subscr.Count).To(Equal(1))
+		}
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err.(net.Error).Timeout()).To(Equal(true))
+			Expect(msgi).To(BeNil())
+		}
+
+		n, err := client.Publish("mychannel1", "hello").Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(n).To(Equal(int64(1)))
+
+		Expect(pubsub.PUnsubscribe("mychannel*")).NotTo(HaveOccurred())
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err).NotTo(HaveOccurred())
+			subscr := msgi.(*redis.Message)
+			Expect(subscr.Channel).To(Equal("mychannel1"))
+			Expect(subscr.Pattern).To(Equal("mychannel*"))
+			Expect(subscr.Payload).To(Equal("hello"))
+		}
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err).NotTo(HaveOccurred())
+			subscr := msgi.(*redis.Subscription)
+			Expect(subscr.Kind).To(Equal("punsubscribe"))
+			Expect(subscr.Channel).To(Equal("mychannel*"))
+			Expect(subscr.Count).To(Equal(0))
+		}
+
+		stats := client.PoolStats()
+		Expect(stats.Misses).To(Equal(uint32(1)))
+	})
+
+	It("should pub/sub channels", func() {
+		channels, err := client.PubSubChannels("mychannel*").Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(channels).To(BeEmpty())
+
+		pubsub := client.Subscribe("mychannel", "mychannel2")
+		defer pubsub.Close()
+
+		channels, err = client.PubSubChannels("mychannel*").Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(channels).To(ConsistOf([]string{"mychannel", "mychannel2"}))
+
+		channels, err = client.PubSubChannels("").Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(channels).To(BeEmpty())
+
+		channels, err = client.PubSubChannels("*").Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(len(channels)).To(BeNumerically(">=", 2))
+	})
+
+	It("should return the numbers of subscribers", func() {
+		pubsub := client.Subscribe("mychannel", "mychannel2")
+		defer pubsub.Close()
+
+		channels, err := client.PubSubNumSub("mychannel", "mychannel2", "mychannel3").Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(channels).To(Equal(map[string]int64{
+			"mychannel":  1,
+			"mychannel2": 1,
+			"mychannel3": 0,
+		}))
+	})
+
+	It("should return the numbers of subscribers by pattern", func() {
+		num, err := client.PubSubNumPat().Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(num).To(Equal(int64(0)))
+
+		pubsub := client.PSubscribe("*")
+		defer pubsub.Close()
+
+		num, err = client.PubSubNumPat().Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(num).To(Equal(int64(1)))
+	})
+
+	It("should pub/sub", func() {
+		pubsub := client.Subscribe("mychannel", "mychannel2")
+		defer pubsub.Close()
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err).NotTo(HaveOccurred())
+			subscr := msgi.(*redis.Subscription)
+			Expect(subscr.Kind).To(Equal("subscribe"))
+			Expect(subscr.Channel).To(Equal("mychannel"))
+			Expect(subscr.Count).To(Equal(1))
+		}
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err).NotTo(HaveOccurred())
+			subscr := msgi.(*redis.Subscription)
+			Expect(subscr.Kind).To(Equal("subscribe"))
+			Expect(subscr.Channel).To(Equal("mychannel2"))
+			Expect(subscr.Count).To(Equal(2))
+		}
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err.(net.Error).Timeout()).To(Equal(true))
+			Expect(msgi).NotTo(HaveOccurred())
+		}
+
+		n, err := client.Publish("mychannel", "hello").Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(n).To(Equal(int64(1)))
+
+		n, err = client.Publish("mychannel2", "hello2").Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(n).To(Equal(int64(1)))
+
+		Expect(pubsub.Unsubscribe("mychannel", "mychannel2")).NotTo(HaveOccurred())
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err).NotTo(HaveOccurred())
+			msg := msgi.(*redis.Message)
+			Expect(msg.Channel).To(Equal("mychannel"))
+			Expect(msg.Payload).To(Equal("hello"))
+		}
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err).NotTo(HaveOccurred())
+			msg := msgi.(*redis.Message)
+			Expect(msg.Channel).To(Equal("mychannel2"))
+			Expect(msg.Payload).To(Equal("hello2"))
+		}
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err).NotTo(HaveOccurred())
+			subscr := msgi.(*redis.Subscription)
+			Expect(subscr.Kind).To(Equal("unsubscribe"))
+			Expect(subscr.Channel).To(Equal("mychannel"))
+			Expect(subscr.Count).To(Equal(1))
+		}
+
+		{
+			msgi, err := pubsub.ReceiveTimeout(time.Second)
+			Expect(err).NotTo(HaveOccurred())
+			subscr := msgi.(*redis.Subscription)
+			Expect(subscr.Kind).To(Equal("unsubscribe"))
+			Expect(subscr.Channel).To(Equal("mychannel2"))
+			Expect(subscr.Count).To(Equal(0))
+		}
+
+		stats := client.PoolStats()
+		Expect(stats.Misses).To(Equal(uint32(1)))
+	})
+
+	It("should ping/pong", func() {
+		pubsub := client.Subscribe("mychannel")
+		defer pubsub.Close()
+
+		_, err := pubsub.ReceiveTimeout(time.Second)
+		Expect(err).NotTo(HaveOccurred())
+
+		err = pubsub.Ping("")
+		Expect(err).NotTo(HaveOccurred())
+
+		msgi, err := pubsub.ReceiveTimeout(time.Second)
+		Expect(err).NotTo(HaveOccurred())
+		pong := msgi.(*redis.Pong)
+		Expect(pong.Payload).To(Equal(""))
+	})
+
+	It("should ping/pong with payload", func() {
+		pubsub := client.Subscribe("mychannel")
+		defer pubsub.Close()
+
+		_, err := pubsub.ReceiveTimeout(time.Second)
+		Expect(err).NotTo(HaveOccurred())
+
+		err = pubsub.Ping("hello")
+		Expect(err).NotTo(HaveOccurred())
+
+		msgi, err := pubsub.ReceiveTimeout(time.Second)
+		Expect(err).NotTo(HaveOccurred())
+		pong := msgi.(*redis.Pong)
+		Expect(pong.Payload).To(Equal("hello"))
+	})
+
+	It("should multi-ReceiveMessage", func() {
+		pubsub := client.Subscribe("mychannel")
+		defer pubsub.Close()
+
+		subscr, err := pubsub.ReceiveTimeout(time.Second)
+		Expect(err).NotTo(HaveOccurred())
+		Expect(subscr).To(Equal(&redis.Subscription{
+			Kind:    "subscribe",
+			Channel: "mychannel",
+			Count:   1,
+		}))
+
+		err = client.Publish("mychannel", "hello").Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		err = client.Publish("mychannel", "world").Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		msg, err := pubsub.ReceiveMessage()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(msg.Channel).To(Equal("mychannel"))
+		Expect(msg.Payload).To(Equal("hello"))
+
+		msg, err = pubsub.ReceiveMessage()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(msg.Channel).To(Equal("mychannel"))
+		Expect(msg.Payload).To(Equal("world"))
+	})
+
+	It("returns an error when subscribe fails", func() {
+		pubsub := client.Subscribe()
+		defer pubsub.Close()
+
+		pubsub.SetNetConn(&badConn{
+			readErr:  io.EOF,
+			writeErr: io.EOF,
+		})
+
+		err := pubsub.Subscribe("mychannel")
+		Expect(err).To(MatchError("EOF"))
+
+		err = pubsub.Subscribe("mychannel")
+		Expect(err).NotTo(HaveOccurred())
+	})
+
+	expectReceiveMessageOnError := func(pubsub *redis.PubSub) {
+		pubsub.SetNetConn(&badConn{
+			readErr:  io.EOF,
+			writeErr: io.EOF,
+		})
+
+		step := make(chan struct{}, 3)
+
+		go func() {
+			defer GinkgoRecover()
+
+			Eventually(step).Should(Receive())
+			err := client.Publish("mychannel", "hello").Err()
+			Expect(err).NotTo(HaveOccurred())
+			step <- struct{}{}
+		}()
+
+		_, err := pubsub.ReceiveMessage()
+		Expect(err).To(Equal(io.EOF))
+		step <- struct{}{}
+
+		msg, err := pubsub.ReceiveMessage()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(msg.Channel).To(Equal("mychannel"))
+		Expect(msg.Payload).To(Equal("hello"))
+
+		Eventually(step).Should(Receive())
+	}
+
+	It("Subscribe should reconnect on ReceiveMessage error", func() {
+		pubsub := client.Subscribe("mychannel")
+		defer pubsub.Close()
+
+		subscr, err := pubsub.ReceiveTimeout(time.Second)
+		Expect(err).NotTo(HaveOccurred())
+		Expect(subscr).To(Equal(&redis.Subscription{
+			Kind:    "subscribe",
+			Channel: "mychannel",
+			Count:   1,
+		}))
+
+		expectReceiveMessageOnError(pubsub)
+	})
+
+	It("PSubscribe should reconnect on ReceiveMessage error", func() {
+		pubsub := client.PSubscribe("mychannel")
+		defer pubsub.Close()
+
+		subscr, err := pubsub.ReceiveTimeout(time.Second)
+		Expect(err).NotTo(HaveOccurred())
+		Expect(subscr).To(Equal(&redis.Subscription{
+			Kind:    "psubscribe",
+			Channel: "mychannel",
+			Count:   1,
+		}))
+
+		expectReceiveMessageOnError(pubsub)
+	})
+
+	It("should return on Close", func() {
+		pubsub := client.Subscribe("mychannel")
+		defer pubsub.Close()
+
+		var wg sync.WaitGroup
+		wg.Add(1)
+		go func() {
+			defer GinkgoRecover()
+
+			wg.Done()
+			defer wg.Done()
+
+			_, err := pubsub.ReceiveMessage()
+			Expect(err).To(HaveOccurred())
+			Expect(err.Error()).To(SatisfyAny(
+				Equal("redis: client is closed"),
+				ContainSubstring("use of closed network connection"),
+			))
+		}()
+
+		wg.Wait()
+		wg.Add(1)
+
+		Expect(pubsub.Close()).NotTo(HaveOccurred())
+
+		wg.Wait()
+	})
+
+	It("should ReceiveMessage without a subscription", func() {
+		timeout := 100 * time.Millisecond
+
+		pubsub := client.Subscribe()
+		defer pubsub.Close()
+
+		var wg sync.WaitGroup
+		wg.Add(1)
+		go func() {
+			defer GinkgoRecover()
+			defer wg.Done()
+
+			time.Sleep(timeout)
+
+			err := pubsub.Subscribe("mychannel")
+			Expect(err).NotTo(HaveOccurred())
+
+			time.Sleep(timeout)
+
+			err = client.Publish("mychannel", "hello").Err()
+			Expect(err).NotTo(HaveOccurred())
+		}()
+
+		msg, err := pubsub.ReceiveMessage()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(msg.Channel).To(Equal("mychannel"))
+		Expect(msg.Payload).To(Equal("hello"))
+
+		wg.Wait()
+	})
+
+	It("handles big message payload", func() {
+		pubsub := client.Subscribe("mychannel")
+		defer pubsub.Close()
+
+		ch := pubsub.Channel()
+
+		bigVal := bigVal()
+		err := client.Publish("mychannel", bigVal).Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		var msg *redis.Message
+		Eventually(ch).Should(Receive(&msg))
+		Expect(msg.Channel).To(Equal("mychannel"))
+		Expect(msg.Payload).To(Equal(string(bigVal)))
+	})
+
+	It("supports concurrent Ping and Receive", func() {
+		const N = 100
+
+		pubsub := client.Subscribe("mychannel")
+		defer pubsub.Close()
+
+		done := make(chan struct{})
+		go func() {
+			defer GinkgoRecover()
+
+			for i := 0; i < N; i++ {
+				_, err := pubsub.ReceiveTimeout(5 * time.Second)
+				Expect(err).NotTo(HaveOccurred())
+			}
+			close(done)
+		}()
+
+		for i := 0; i < N; i++ {
+			err := pubsub.Ping()
+			Expect(err).NotTo(HaveOccurred())
+		}
+
+		select {
+		case <-done:
+		case <-time.After(30 * time.Second):
+			Fail("timeout")
+		}
+	})
+})

+ 355 - 0
race_test.go

@@ -0,0 +1,355 @@
+package redis_test
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"net"
+	"strconv"
+	"sync/atomic"
+	"testing"
+	"time"
+
+	"github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("races", func() {
+	var client *redis.Client
+	var C, N int
+
+	BeforeEach(func() {
+		client = redis.NewClient(redisOptions())
+		Expect(client.FlushDB().Err()).To(BeNil())
+
+		C, N = 10, 1000
+		if testing.Short() {
+			C = 4
+			N = 100
+		}
+	})
+
+	AfterEach(func() {
+		err := client.Close()
+		Expect(err).NotTo(HaveOccurred())
+	})
+
+	It("should echo", func() {
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				msg := fmt.Sprintf("echo %d %d", id, i)
+				echo, err := client.Echo(msg).Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(echo).To(Equal(msg))
+			}
+		})
+	})
+
+	It("should incr", func() {
+		key := "TestIncrFromGoroutines"
+
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				err := client.Incr(key).Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+		})
+
+		val, err := client.Get(key).Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(val).To(Equal(int64(C * N)))
+	})
+
+	It("should handle many keys", func() {
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				err := client.Set(
+					fmt.Sprintf("keys.key-%d-%d", id, i),
+					fmt.Sprintf("hello-%d-%d", id, i),
+					0,
+				).Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+		})
+
+		keys := client.Keys("keys.*")
+		Expect(keys.Err()).NotTo(HaveOccurred())
+		Expect(len(keys.Val())).To(Equal(C * N))
+	})
+
+	It("should handle many keys 2", func() {
+		perform(C, func(id int) {
+			keys := []string{"non-existent-key"}
+			for i := 0; i < N; i++ {
+				key := fmt.Sprintf("keys.key-%d", i)
+				keys = append(keys, key)
+
+				err := client.Set(key, fmt.Sprintf("hello-%d", i), 0).Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+			keys = append(keys, "non-existent-key")
+
+			vals, err := client.MGet(keys...).Result()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(len(vals)).To(Equal(N + 2))
+
+			for i := 0; i < N; i++ {
+				Expect(vals[i+1]).To(Equal(fmt.Sprintf("hello-%d", i)))
+			}
+
+			Expect(vals[0]).To(BeNil())
+			Expect(vals[N+1]).To(BeNil())
+		})
+	})
+
+	It("should handle big vals in Get", func() {
+		C, N = 4, 100
+
+		bigVal := bigVal()
+
+		err := client.Set("key", bigVal, 0).Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		// Reconnect to get new connection.
+		Expect(client.Close()).To(BeNil())
+		client = redis.NewClient(redisOptions())
+
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				got, err := client.Get("key").Bytes()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(got).To(Equal(bigVal))
+			}
+		})
+	})
+
+	It("should handle big vals in Set", func() {
+		C, N = 4, 100
+
+		bigVal := bigVal()
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				err := client.Set("key", bigVal, 0).Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+		})
+	})
+
+	It("should select db", func() {
+		err := client.Set("db", 1, 0).Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		perform(C, func(id int) {
+			opt := redisOptions()
+			opt.DB = id
+			client := redis.NewClient(opt)
+			for i := 0; i < N; i++ {
+				err := client.Set("db", id, 0).Err()
+				Expect(err).NotTo(HaveOccurred())
+
+				n, err := client.Get("db").Int64()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(n).To(Equal(int64(id)))
+			}
+			err := client.Close()
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		n, err := client.Get("db").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(n).To(Equal(int64(1)))
+	})
+
+	It("should select DB with read timeout", func() {
+		perform(C, func(id int) {
+			opt := redisOptions()
+			opt.DB = id
+			opt.ReadTimeout = time.Nanosecond
+			client := redis.NewClient(opt)
+
+			perform(C, func(id int) {
+				err := client.Ping().Err()
+				Expect(err).To(HaveOccurred())
+				Expect(err.(net.Error).Timeout()).To(BeTrue())
+			})
+
+			err := client.Close()
+			Expect(err).NotTo(HaveOccurred())
+		})
+	})
+
+	It("should Watch/Unwatch", func() {
+		err := client.Set("key", "0", 0).Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				err := client.Watch(func(tx *redis.Tx) error {
+					val, err := tx.Get("key").Result()
+					Expect(err).NotTo(HaveOccurred())
+					Expect(val).NotTo(Equal(redis.Nil))
+
+					num, err := strconv.ParseInt(val, 10, 64)
+					Expect(err).NotTo(HaveOccurred())
+
+					cmds, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+						pipe.Set("key", strconv.FormatInt(num+1, 10), 0)
+						return nil
+					})
+					Expect(cmds).To(HaveLen(1))
+					return err
+				}, "key")
+				if err == redis.TxFailedErr {
+					i--
+					continue
+				}
+				Expect(err).NotTo(HaveOccurred())
+			}
+		})
+
+		val, err := client.Get("key").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(val).To(Equal(int64(C * N)))
+	})
+
+	It("should Pipeline", func() {
+		perform(C, func(id int) {
+			pipe := client.Pipeline()
+			for i := 0; i < N; i++ {
+				pipe.Echo(fmt.Sprint(i))
+			}
+
+			cmds, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(cmds).To(HaveLen(N))
+
+			for i := 0; i < N; i++ {
+				Expect(cmds[i].(*redis.StringCmd).Val()).To(Equal(fmt.Sprint(i)))
+			}
+		})
+	})
+
+	It("should Pipeline", func() {
+		pipe := client.Pipeline()
+		perform(N, func(id int) {
+			pipe.Incr("key")
+		})
+
+		cmds, err := pipe.Exec()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(cmds).To(HaveLen(N))
+
+		n, err := client.Get("key").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(n).To(Equal(int64(N)))
+	})
+
+	It("should TxPipeline", func() {
+		pipe := client.TxPipeline()
+		perform(N, func(id int) {
+			pipe.Incr("key")
+		})
+
+		cmds, err := pipe.Exec()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(cmds).To(HaveLen(N))
+
+		n, err := client.Get("key").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(n).To(Equal(int64(N)))
+	})
+
+	It("should BLPop", func() {
+		var received uint32
+		wg := performAsync(C, func(id int) {
+			for {
+				v, err := client.BLPop(3*time.Second, "list").Result()
+				if err != nil {
+					break
+				}
+				Expect(v).To(Equal([]string{"list", "hello"}))
+				atomic.AddUint32(&received, 1)
+			}
+		})
+
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				err := client.LPush("list", "hello").Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+		})
+
+		wg.Wait()
+		Expect(received).To(Equal(uint32(C * N)))
+	})
+
+	It("should WithContext", func() {
+		perform(C, func(_ int) {
+			err := client.WithContext(context.Background()).Ping().Err()
+			Expect(err).NotTo(HaveOccurred())
+		})
+	})
+})
+
+var _ = Describe("cluster races", func() {
+	var client *redis.ClusterClient
+	var C, N int
+
+	BeforeEach(func() {
+		opt := redisClusterOptions()
+		client = cluster.clusterClient(opt)
+
+		C, N = 10, 1000
+		if testing.Short() {
+			C = 4
+			N = 100
+		}
+	})
+
+	AfterEach(func() {
+		err := client.Close()
+		Expect(err).NotTo(HaveOccurred())
+	})
+
+	It("should echo", func() {
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				msg := fmt.Sprintf("echo %d %d", id, i)
+				echo, err := client.Echo(msg).Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(echo).To(Equal(msg))
+			}
+		})
+	})
+
+	It("should get", func() {
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				key := fmt.Sprintf("key_%d_%d", id, i)
+				_, err := client.Get(key).Result()
+				Expect(err).To(Equal(redis.Nil))
+			}
+		})
+	})
+
+	It("should incr", func() {
+		key := "TestIncrFromGoroutines"
+
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				err := client.Incr(key).Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+		})
+
+		val, err := client.Get(key).Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(val).To(Equal(int64(C * N)))
+	})
+})
+
+func bigVal() []byte {
+	return bytes.Repeat([]byte{'*'}, 1<<17) // 128kb
+}

+ 742 - 0
redis.go

@@ -0,0 +1,742 @@
+package redis
+
+import (
+	"context"
+	"fmt"
+	"log"
+	"time"
+
+	"github.com/go-redis/redis/internal"
+	"github.com/go-redis/redis/internal/pool"
+	"github.com/go-redis/redis/internal/proto"
+)
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+func SetLogger(logger *log.Logger) {
+	internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+	BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
+	AfterProcess(ctx context.Context, cmd Cmder) error
+
+	BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
+	AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
+}
+
+type hooks struct {
+	hooks []Hook
+}
+
+func (hs *hooks) Lock() {
+	hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
+}
+
+func (hs hooks) Clone() hooks {
+	clone := hs
+	clone.Lock()
+	return clone
+}
+
+func (hs *hooks) AddHook(hook Hook) {
+	hs.hooks = append(hs.hooks, hook)
+}
+
+func (hs hooks) process(
+	ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
+) error {
+	ctx, err := hs.beforeProcess(ctx, cmd)
+	if err != nil {
+		cmd.SetErr(err)
+		return err
+	}
+
+	cmdErr := fn(ctx, cmd)
+
+	if err := hs.afterProcess(ctx, cmd); err != nil {
+		cmd.SetErr(err)
+		return err
+	}
+
+	return cmdErr
+}
+
+func (hs hooks) beforeProcess(ctx context.Context, cmd Cmder) (context.Context, error) {
+	for _, h := range hs.hooks {
+		var err error
+		ctx, err = h.BeforeProcess(ctx, cmd)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return ctx, nil
+}
+
+func (hs hooks) afterProcess(ctx context.Context, cmd Cmder) error {
+	var firstErr error
+	for _, h := range hs.hooks {
+		err := h.AfterProcess(ctx, cmd)
+		if err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	return firstErr
+}
+
+func (hs hooks) processPipeline(
+	ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+	ctx, err := hs.beforeProcessPipeline(ctx, cmds)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	cmdsErr := fn(ctx, cmds)
+
+	if err := hs.afterProcessPipeline(ctx, cmds); err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	return cmdsErr
+}
+
+func (hs hooks) beforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) {
+	for _, h := range hs.hooks {
+		var err error
+		ctx, err = h.BeforeProcessPipeline(ctx, cmds)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return ctx, nil
+}
+
+func (hs hooks) afterProcessPipeline(ctx context.Context, cmds []Cmder) error {
+	var firstErr error
+	for _, h := range hs.hooks {
+		err := h.AfterProcessPipeline(ctx, cmds)
+		if err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+	opt      *Options
+	connPool pool.Pooler
+
+	onClose func() error // hook called when client is closed
+}
+
+func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
+	return &baseClient{
+		opt:      opt,
+		connPool: connPool,
+	}
+}
+
+func (c *baseClient) clone() *baseClient {
+	clone := *c
+	return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+	opt := c.opt.clone()
+	opt.ReadTimeout = timeout
+	opt.WriteTimeout = timeout
+
+	clone := c.clone()
+	clone.opt = opt
+
+	return clone
+}
+
+func (c *baseClient) String() string {
+	return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+	cn, err := c.connPool.NewConn(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	err = c.initConn(ctx, cn)
+	if err != nil {
+		_ = c.connPool.CloseConn(cn)
+		return nil, err
+	}
+
+	return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+	if c.opt.Limiter != nil {
+		err := c.opt.Limiter.Allow()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	cn, err := c._getConn(ctx)
+	if err != nil {
+		if c.opt.Limiter != nil {
+			c.opt.Limiter.ReportResult(err)
+		}
+		return nil, err
+	}
+	return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+	cn, err := c.connPool.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	err = c.initConn(ctx, cn)
+	if err != nil {
+		c.connPool.Remove(cn, err)
+		if err := internal.Unwrap(err); err != nil {
+			return nil, err
+		}
+		return nil, err
+	}
+
+	return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+	if cn.Inited {
+		return nil
+	}
+	cn.Inited = true
+
+	if c.opt.Password == "" &&
+		c.opt.DB == 0 &&
+		!c.opt.readOnly &&
+		c.opt.OnConnect == nil {
+		return nil
+	}
+
+	connPool := pool.NewSingleConnPool(nil)
+	connPool.SetConn(cn)
+	conn := newConn(ctx, c.opt, connPool)
+
+	_, err := conn.Pipelined(func(pipe Pipeliner) error {
+		if c.opt.Password != "" {
+			pipe.Auth(c.opt.Password)
+		}
+
+		if c.opt.DB > 0 {
+			pipe.Select(c.opt.DB)
+		}
+
+		if c.opt.readOnly {
+			pipe.ReadOnly()
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	if c.opt.OnConnect != nil {
+		return c.opt.OnConnect(conn)
+	}
+	return nil
+}
+
+func (c *baseClient) releaseConn(cn *pool.Conn, err error) {
+	if c.opt.Limiter != nil {
+		c.opt.Limiter.ReportResult(err)
+	}
+
+	if isBadConn(err, false) {
+		c.connPool.Remove(cn, err)
+	} else {
+		c.connPool.Put(cn)
+	}
+}
+
+func (c *baseClient) withConn(
+	ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+	cn, err := c.getConn(ctx)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		c.releaseConn(cn, err)
+	}()
+
+	err = fn(ctx, cn)
+	return err
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+	err := c._process(ctx, cmd)
+	if err != nil {
+		cmd.SetErr(err)
+		return err
+	}
+	return nil
+}
+
+func (c *baseClient) _process(ctx context.Context, cmd Cmder) error {
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		retryTimeout := true
+		lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+			err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+				return writeCmd(wr, cmd)
+			})
+			if err != nil {
+				return err
+			}
+
+			err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+			if err != nil {
+				retryTimeout = cmd.readTimeout() == nil
+				return err
+			}
+
+			return nil
+		})
+		if lastErr == nil || !isRetryableError(lastErr, retryTimeout) {
+			return lastErr
+		}
+	}
+	return lastErr
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+	if timeout := cmd.readTimeout(); timeout != nil {
+		t := *timeout
+		if t == 0 {
+			return 0
+		}
+		return t + 10*time.Second
+	}
+	return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+	var firstErr error
+	if c.onClose != nil {
+		if err := c.onClose(); err != nil {
+			firstErr = err
+		}
+	}
+	if err := c.connPool.Close(); err != nil && firstErr == nil {
+		firstErr = err
+	}
+	return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+	return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+	ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+	err := c._generalProcessPipeline(ctx, cmds, p)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+	return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) _generalProcessPipeline(
+	ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		var canRetry bool
+		lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+			var err error
+			canRetry, err = p(ctx, cn, cmds)
+			return err
+		})
+		if lastErr == nil || !canRetry || !isRetryableError(lastErr, true) {
+			return lastErr
+		}
+	}
+	return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+	ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+	err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+		return writeCmd(wr, cmds...)
+	})
+	if err != nil {
+		return true, err
+	}
+
+	err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+		return pipelineReadCmds(rd, cmds)
+	})
+	return true, err
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+	for _, cmd := range cmds {
+		err := cmd.readReply(rd)
+		if err != nil && !isRedisError(err) {
+			return err
+		}
+	}
+	return nil
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+	ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+	err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+		return txPipelineWriteMulti(wr, cmds)
+	})
+	if err != nil {
+		return true, err
+	}
+
+	err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+		err := txPipelineReadQueued(rd, cmds)
+		if err != nil {
+			return err
+		}
+		return pipelineReadCmds(rd, cmds)
+	})
+	return false, err
+}
+
+func txPipelineWriteMulti(wr *proto.Writer, cmds []Cmder) error {
+	multiExec := make([]Cmder, 0, len(cmds)+2)
+	multiExec = append(multiExec, NewStatusCmd("MULTI"))
+	multiExec = append(multiExec, cmds...)
+	multiExec = append(multiExec, NewSliceCmd("EXEC"))
+	return writeCmd(wr, multiExec...)
+}
+
+func txPipelineReadQueued(rd *proto.Reader, cmds []Cmder) error {
+	// Parse queued replies.
+	var statusCmd StatusCmd
+	err := statusCmd.readReply(rd)
+	if err != nil {
+		return err
+	}
+
+	for range cmds {
+		err = statusCmd.readReply(rd)
+		if err != nil && !isRedisError(err) {
+			return err
+		}
+	}
+
+	// Parse number of replies.
+	line, err := rd.ReadLine()
+	if err != nil {
+		if err == Nil {
+			err = TxFailedErr
+		}
+		return err
+	}
+
+	switch line[0] {
+	case proto.ErrorReply:
+		return proto.ParseErrorReply(line)
+	case proto.ArrayReply:
+		// ok
+	default:
+		err := fmt.Errorf("redis: expected '*', but got line %q", line)
+		return err
+	}
+
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more
+// underlying connections. It's safe for concurrent use by multiple
+// goroutines.
+type Client struct {
+	*baseClient
+	cmdable
+	hooks
+	ctx context.Context
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+	opt.init()
+
+	c := Client{
+		baseClient: newBaseClient(opt, newConnPool(opt)),
+		ctx:        context.Background(),
+	}
+	c.cmdable = c.Process
+
+	return &c
+}
+
+func (c *Client) clone() *Client {
+	clone := *c
+	clone.cmdable = clone.Process
+	clone.hooks.Lock()
+	return &clone
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+	clone := c.clone()
+	clone.baseClient = c.baseClient.withTimeout(timeout)
+	return clone
+}
+
+func (c *Client) Context() context.Context {
+	return c.ctx
+}
+
+func (c *Client) WithContext(ctx context.Context) *Client {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := c.clone()
+	clone.ctx = ctx
+	return clone
+}
+
+func (c *Client) Conn() *Conn {
+	return newConn(c.ctx, c.opt, pool.NewSingleConnPool(c.connPool))
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Client) Do(args ...interface{}) *Cmd {
+	return c.DoContext(c.ctx, args...)
+}
+
+func (c *Client) DoContext(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(args...)
+	_ = c.ProcessContext(ctx, cmd)
+	return cmd
+}
+
+func (c *Client) Process(cmd Cmder) error {
+	return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *Client) ProcessContext(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+	return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+	stats := c.connPool.Stats()
+	return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Client) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+	pubsub := &PubSub{
+		opt: c.opt,
+
+		newConn: func(channels []string) (*pool.Conn, error) {
+			return c.newConn(context.TODO())
+		},
+		closeConn: c.connPool.CloseConn,
+	}
+	pubsub.init()
+	return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+//    sub := client.Subscribe(queryResp)
+//    iface, err := sub.Receive()
+//    if err != nil {
+//        // handle error
+//    }
+//
+//    // Should be *Subscription, but others are possible if other actions have been
+//    // taken on sub since it was created.
+//    switch iface.(type) {
+//    case *Subscription:
+//        // subscribe succeeded
+//    case *Message:
+//        // received first message
+//    case *Pong:
+//        // pong received
+//    default:
+//        // handle error
+//    }
+//
+//    ch := sub.Channel()
+func (c *Client) Subscribe(channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.Subscribe(channels...)
+	}
+	return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.PSubscribe(channels...)
+	}
+	return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+type conn struct {
+	baseClient
+	cmdable
+	statefulCmdable
+}
+
+// Conn is like Client, but its pool contains single connection.
+type Conn struct {
+	*conn
+	ctx context.Context
+}
+
+func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
+	c := Conn{
+		conn: &conn{
+			baseClient: baseClient{
+				opt:      opt,
+				connPool: connPool,
+			},
+		},
+		ctx: ctx,
+	}
+	c.cmdable = c.Process
+	c.statefulCmdable = c.Process
+	return &c
+}
+
+func (c *Conn) Process(cmd Cmder) error {
+	return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *Conn) ProcessContext(ctx context.Context, cmd Cmder) error {
+	return c.baseClient.process(ctx, cmd)
+}
+
+func (c *Conn) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Conn) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}

+ 391 - 0
redis_test.go

@@ -0,0 +1,391 @@
+package redis_test
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"net"
+	"testing"
+	"time"
+
+	"github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+type redisHookError struct {
+	redis.Hook
+}
+
+var _ redis.Hook = redisHookError{}
+
+func (redisHookError) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
+	return ctx, nil
+}
+
+func (redisHookError) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
+	return errors.New("hook error")
+}
+
+func TestHookError(t *testing.T) {
+	rdb := redis.NewClient(&redis.Options{
+		Addr: ":6379",
+	})
+	rdb.AddHook(redisHookError{})
+
+	err := rdb.Ping().Err()
+	if err == nil {
+		t.Fatalf("got nil, expected an error")
+	}
+
+	wanted := "hook error"
+	if err.Error() != wanted {
+		t.Fatalf(`got %q, wanted %q`, err, wanted)
+	}
+}
+
+//------------------------------------------------------------------------------
+
+var _ = Describe("Client", func() {
+	var client *redis.Client
+
+	BeforeEach(func() {
+		client = redis.NewClient(redisOptions())
+		Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		client.Close()
+	})
+
+	It("should Stringer", func() {
+		Expect(client.String()).To(Equal("Redis<:6380 db:15>"))
+	})
+
+	It("supports WithContext", func() {
+		c, cancel := context.WithCancel(context.Background())
+		cancel()
+
+		err := client.WithContext(c).Ping().Err()
+		Expect(err).To(MatchError("context canceled"))
+	})
+
+	It("supports WithTimeout", func() {
+		err := client.ClientPause(time.Second).Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		err = client.WithTimeout(10 * time.Millisecond).Ping().Err()
+		Expect(err).To(HaveOccurred())
+
+		err = client.Ping().Err()
+		Expect(err).NotTo(HaveOccurred())
+	})
+
+	It("should ping", func() {
+		val, err := client.Ping().Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(val).To(Equal("PONG"))
+	})
+
+	It("should return pool stats", func() {
+		Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{}))
+	})
+
+	It("should support custom dialers", func() {
+		custom := redis.NewClient(&redis.Options{
+			Network: "tcp",
+			Addr:    redisAddr,
+			Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
+				var d net.Dialer
+				return d.DialContext(ctx, network, addr)
+			},
+		})
+
+		val, err := custom.Ping().Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(val).To(Equal("PONG"))
+		Expect(custom.Close()).NotTo(HaveOccurred())
+	})
+
+	It("should close", func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+		err := client.Ping().Err()
+		Expect(err).To(MatchError("redis: client is closed"))
+	})
+
+	It("should close pubsub without closing the client", func() {
+		pubsub := client.Subscribe()
+		Expect(pubsub.Close()).NotTo(HaveOccurred())
+
+		_, err := pubsub.Receive()
+		Expect(err).To(MatchError("redis: client is closed"))
+		Expect(client.Ping().Err()).NotTo(HaveOccurred())
+	})
+
+	It("should close Tx without closing the client", func() {
+		err := client.Watch(func(tx *redis.Tx) error {
+			_, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+				pipe.Ping()
+				return nil
+			})
+			return err
+		})
+		Expect(err).NotTo(HaveOccurred())
+
+		Expect(client.Ping().Err()).NotTo(HaveOccurred())
+	})
+
+	It("should close pipeline without closing the client", func() {
+		pipeline := client.Pipeline()
+		Expect(pipeline.Close()).NotTo(HaveOccurred())
+
+		pipeline.Ping()
+		_, err := pipeline.Exec()
+		Expect(err).To(MatchError("redis: client is closed"))
+
+		Expect(client.Ping().Err()).NotTo(HaveOccurred())
+	})
+
+	It("should close pubsub when client is closed", func() {
+		pubsub := client.Subscribe()
+		Expect(client.Close()).NotTo(HaveOccurred())
+
+		_, err := pubsub.Receive()
+		Expect(err).To(MatchError("redis: client is closed"))
+
+		Expect(pubsub.Close()).NotTo(HaveOccurred())
+	})
+
+	It("should close pipeline when client is closed", func() {
+		pipeline := client.Pipeline()
+		Expect(client.Close()).NotTo(HaveOccurred())
+		Expect(pipeline.Close()).NotTo(HaveOccurred())
+	})
+
+	It("should select DB", func() {
+		db2 := redis.NewClient(&redis.Options{
+			Addr: redisAddr,
+			DB:   2,
+		})
+		Expect(db2.FlushDB().Err()).NotTo(HaveOccurred())
+		Expect(db2.Get("db").Err()).To(Equal(redis.Nil))
+		Expect(db2.Set("db", 2, 0).Err()).NotTo(HaveOccurred())
+
+		n, err := db2.Get("db").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(n).To(Equal(int64(2)))
+
+		Expect(client.Get("db").Err()).To(Equal(redis.Nil))
+
+		Expect(db2.FlushDB().Err()).NotTo(HaveOccurred())
+		Expect(db2.Close()).NotTo(HaveOccurred())
+	})
+
+	It("processes custom commands", func() {
+		cmd := redis.NewCmd("PING")
+		_ = client.Process(cmd)
+
+		// Flush buffers.
+		Expect(client.Echo("hello").Err()).NotTo(HaveOccurred())
+
+		Expect(cmd.Err()).NotTo(HaveOccurred())
+		Expect(cmd.Val()).To(Equal("PONG"))
+	})
+
+	It("should retry command on network error", func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+
+		client = redis.NewClient(&redis.Options{
+			Addr:       redisAddr,
+			MaxRetries: 1,
+		})
+
+		// Put bad connection in the pool.
+		cn, err := client.Pool().Get(context.Background())
+		Expect(err).NotTo(HaveOccurred())
+
+		cn.SetNetConn(&badConn{})
+		client.Pool().Put(cn)
+
+		err = client.Ping().Err()
+		Expect(err).NotTo(HaveOccurred())
+	})
+
+	It("should retry with backoff", func() {
+		clientNoRetry := redis.NewClient(&redis.Options{
+			Addr:       ":1234",
+			MaxRetries: 0,
+		})
+		defer clientNoRetry.Close()
+
+		clientRetry := redis.NewClient(&redis.Options{
+			Addr:            ":1234",
+			MaxRetries:      5,
+			MaxRetryBackoff: 128 * time.Millisecond,
+		})
+		defer clientRetry.Close()
+
+		startNoRetry := time.Now()
+		err := clientNoRetry.Ping().Err()
+		Expect(err).To(HaveOccurred())
+		elapseNoRetry := time.Since(startNoRetry)
+
+		startRetry := time.Now()
+		err = clientRetry.Ping().Err()
+		Expect(err).To(HaveOccurred())
+		elapseRetry := time.Since(startRetry)
+
+		Expect(elapseRetry).To(BeNumerically(">", elapseNoRetry, 10*time.Millisecond))
+	})
+
+	It("should update conn.UsedAt on read/write", func() {
+		cn, err := client.Pool().Get(context.Background())
+		Expect(err).NotTo(HaveOccurred())
+		Expect(cn.UsedAt).NotTo(BeZero())
+		createdAt := cn.UsedAt()
+
+		client.Pool().Put(cn)
+		Expect(cn.UsedAt().Equal(createdAt)).To(BeTrue())
+
+		time.Sleep(time.Second)
+
+		err = client.Ping().Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		cn, err = client.Pool().Get(context.Background())
+		Expect(err).NotTo(HaveOccurred())
+		Expect(cn).NotTo(BeNil())
+		Expect(cn.UsedAt().After(createdAt)).To(BeTrue())
+	})
+
+	It("should process command with special chars", func() {
+		set := client.Set("key", "hello1\r\nhello2\r\n", 0)
+		Expect(set.Err()).NotTo(HaveOccurred())
+		Expect(set.Val()).To(Equal("OK"))
+
+		get := client.Get("key")
+		Expect(get.Err()).NotTo(HaveOccurred())
+		Expect(get.Val()).To(Equal("hello1\r\nhello2\r\n"))
+	})
+
+	It("should handle big vals", func() {
+		bigVal := bytes.Repeat([]byte{'*'}, 2e6)
+
+		err := client.Set("key", bigVal, 0).Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		// Reconnect to get new connection.
+		Expect(client.Close()).NotTo(HaveOccurred())
+		client = redis.NewClient(redisOptions())
+
+		got, err := client.Get("key").Bytes()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(got).To(Equal(bigVal))
+	})
+})
+
+var _ = Describe("Client timeout", func() {
+	var opt *redis.Options
+	var client *redis.Client
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	testTimeout := func() {
+		It("Ping timeouts", func() {
+			err := client.Ping().Err()
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+
+		It("Pipeline timeouts", func() {
+			_, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+				pipe.Ping()
+				return nil
+			})
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+
+		It("Subscribe timeouts", func() {
+			if opt.WriteTimeout == 0 {
+				return
+			}
+
+			pubsub := client.Subscribe()
+			defer pubsub.Close()
+
+			err := pubsub.Subscribe("_")
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+
+		It("Tx timeouts", func() {
+			err := client.Watch(func(tx *redis.Tx) error {
+				return tx.Ping().Err()
+			})
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+
+		It("Tx Pipeline timeouts", func() {
+			err := client.Watch(func(tx *redis.Tx) error {
+				_, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+					pipe.Ping()
+					return nil
+				})
+				return err
+			})
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+	}
+
+	Context("read timeout", func() {
+		BeforeEach(func() {
+			opt = redisOptions()
+			opt.ReadTimeout = time.Nanosecond
+			opt.WriteTimeout = -1
+			client = redis.NewClient(opt)
+		})
+
+		testTimeout()
+	})
+
+	Context("write timeout", func() {
+		BeforeEach(func() {
+			opt = redisOptions()
+			opt.ReadTimeout = -1
+			opt.WriteTimeout = time.Nanosecond
+			client = redis.NewClient(opt)
+		})
+
+		testTimeout()
+	})
+})
+
+var _ = Describe("Client OnConnect", func() {
+	var client *redis.Client
+
+	BeforeEach(func() {
+		opt := redisOptions()
+		opt.DB = 0
+		opt.OnConnect = func(cn *redis.Conn) error {
+			return cn.ClientSetName("on_connect").Err()
+		}
+
+		client = redis.NewClient(opt)
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("calls OnConnect", func() {
+		name, err := client.ClientGetName().Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(name).To(Equal("on_connect"))
+	})
+})

+ 172 - 0
result.go

@@ -0,0 +1,172 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initialised with val and err for testing
+func NewCmdResult(val interface{}, err error) *Cmd {
+	var cmd Cmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initialised with val and err for testing
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+	var cmd SliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initialised with val and err for testing
+func NewStatusResult(val string, err error) *StatusCmd {
+	var cmd StatusCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewIntResult returns an IntCmd initialised with val and err for testing
+func NewIntResult(val int64, err error) *IntCmd {
+	var cmd IntCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initialised with val and err for testing
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+	var cmd DurationCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initialised with val and err for testing
+func NewBoolResult(val bool, err error) *BoolCmd {
+	var cmd BoolCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringResult returns a StringCmd initialised with val and err for testing
+func NewStringResult(val string, err error) *StringCmd {
+	var cmd StringCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initialised with val and err for testing
+func NewFloatResult(val float64, err error) *FloatCmd {
+	var cmd FloatCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+	var cmd StringSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+	var cmd BoolSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing
+func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
+	var cmd StringStringMapCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing
+func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
+	var cmd StringIntMapCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+	var cmd ZSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing
+func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
+	var cmd ZWithKeyCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initialised with val and err for testing
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+	var cmd ScanCmd
+	cmd.page = keys
+	cmd.cursor = cursor
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+	var cmd ClusterSlotsCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+	var cmd GeoLocationCmd
+	cmd.locations = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing
+func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
+	var cmd GeoPosCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+	var cmd CommandsInfoCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing
+func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
+	var cmd XMessageSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing
+func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
+	var cmd XStreamSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}

+ 722 - 0
ring.go

@@ -0,0 +1,722 @@
+package redis
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math/rand"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/internal"
+	"github.com/go-redis/redis/internal/consistenthash"
+	"github.com/go-redis/redis/internal/hashtag"
+	"github.com/go-redis/redis/internal/pool"
+)
+
+// Hash is type of hash function used in consistent hash.
+type Hash consistenthash.Hash
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+	// Map of name => host:port addresses of ring shards.
+	Addrs map[string]string
+
+	// Map of name => password of ring shards, to allow different shards to have
+	// different passwords. It will be ignored if the Password field is set.
+	Passwords map[string]string
+
+	// Frequency of PING commands sent to check shards availability.
+	// Shard is considered down after 3 subsequent failed checks.
+	HeartbeatFrequency time.Duration
+
+	// Hash function used in consistent hash.
+	// Default is crc32.ChecksumIEEE.
+	Hash Hash
+
+	// Number of replicas in consistent hash.
+	// Default is 100 replicas.
+	//
+	// Higher number of replicas will provide less deviation, that is keys will be
+	// distributed to nodes more evenly.
+	//
+	// Following is deviation for common nreplicas:
+	//  --------------------------------------------------------
+	//  | nreplicas | standard error | 99% confidence interval |
+	//  |     10    |     0.3152     |      (0.37, 1.98)       |
+	//  |    100    |     0.0997     |      (0.76, 1.28)       |
+	//  |   1000    |     0.0316     |      (0.92, 1.09)       |
+	//  --------------------------------------------------------
+	//
+	//  See https://arxiv.org/abs/1406.2294 for reference
+	HashReplicas int
+
+	// Optional hook that is called when a new shard is created.
+	OnNewShard func(*Client)
+
+	// Following options are copied from Options struct.
+
+	OnConnect func(*Conn) error
+
+	DB       int
+	Password string
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+}
+
+func (opt *RingOptions) init() {
+	if opt.HeartbeatFrequency == 0 {
+		opt.HeartbeatFrequency = 500 * time.Millisecond
+	}
+
+	if opt.HashReplicas == 0 {
+		opt.HashReplicas = 100
+	}
+
+	switch opt.MinRetryBackoff {
+	case -1:
+		opt.MinRetryBackoff = 0
+	case 0:
+		opt.MinRetryBackoff = 8 * time.Millisecond
+	}
+	switch opt.MaxRetryBackoff {
+	case -1:
+		opt.MaxRetryBackoff = 0
+	case 0:
+		opt.MaxRetryBackoff = 512 * time.Millisecond
+	}
+}
+
+func (opt *RingOptions) clientOptions(shard string) *Options {
+	return &Options{
+		OnConnect: opt.OnConnect,
+
+		DB:       opt.DB,
+		Password: opt.getPassword(shard),
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolSize:           opt.PoolSize,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+	}
+}
+
+func (opt *RingOptions) getPassword(shard string) string {
+	if opt.Password == "" {
+		return opt.Passwords[shard]
+	}
+	return opt.Password
+}
+
+//------------------------------------------------------------------------------
+
+type ringShard struct {
+	Client *Client
+	down   int32
+}
+
+func (shard *ringShard) String() string {
+	var state string
+	if shard.IsUp() {
+		state = "up"
+	} else {
+		state = "down"
+	}
+	return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+	const threshold = 3
+	return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+	return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+	if up {
+		changed := shard.IsDown()
+		atomic.StoreInt32(&shard.down, 0)
+		return changed
+	}
+
+	if shard.IsDown() {
+		return false
+	}
+
+	atomic.AddInt32(&shard.down, 1)
+	return shard.IsDown()
+}
+
+//------------------------------------------------------------------------------
+
+type ringShards struct {
+	opt *RingOptions
+
+	mu     sync.RWMutex
+	hash   *consistenthash.Map
+	shards map[string]*ringShard // read only
+	list   []*ringShard          // read only
+	len    int
+	closed bool
+}
+
+func newRingShards(opt *RingOptions) *ringShards {
+	return &ringShards{
+		opt: opt,
+
+		hash:   newConsistentHash(opt),
+		shards: make(map[string]*ringShard),
+	}
+}
+
+func (c *ringShards) Add(name string, cl *Client) {
+	shard := &ringShard{Client: cl}
+	c.hash.Add(name)
+	c.shards[name] = shard
+	c.list = append(c.list, shard)
+}
+
+func (c *ringShards) List() []*ringShard {
+	c.mu.RLock()
+	list := c.list
+	c.mu.RUnlock()
+	return list
+}
+
+func (c *ringShards) Hash(key string) string {
+	c.mu.RLock()
+	hash := c.hash.Get(key)
+	c.mu.RUnlock()
+	return hash
+}
+
+func (c *ringShards) GetByKey(key string) (*ringShard, error) {
+	key = hashtag.Key(key)
+
+	c.mu.RLock()
+
+	if c.closed {
+		c.mu.RUnlock()
+		return nil, pool.ErrClosed
+	}
+
+	hash := c.hash.Get(key)
+	if hash == "" {
+		c.mu.RUnlock()
+		return nil, errRingShardsDown
+	}
+
+	shard := c.shards[hash]
+	c.mu.RUnlock()
+
+	return shard, nil
+}
+
+func (c *ringShards) GetByHash(name string) (*ringShard, error) {
+	if name == "" {
+		return c.Random()
+	}
+
+	c.mu.RLock()
+	shard := c.shards[name]
+	c.mu.RUnlock()
+	return shard, nil
+}
+
+func (c *ringShards) Random() (*ringShard, error) {
+	return c.GetByKey(strconv.Itoa(rand.Int()))
+}
+
+// heartbeat monitors state of each shard in the ring.
+func (c *ringShards) Heartbeat(frequency time.Duration) {
+	ticker := time.NewTicker(frequency)
+	defer ticker.Stop()
+	for range ticker.C {
+		var rebalance bool
+
+		c.mu.RLock()
+
+		if c.closed {
+			c.mu.RUnlock()
+			break
+		}
+
+		shards := c.list
+		c.mu.RUnlock()
+
+		for _, shard := range shards {
+			err := shard.Client.Ping().Err()
+			if shard.Vote(err == nil || err == pool.ErrPoolTimeout) {
+				internal.Logger.Printf("ring shard state changed: %s", shard)
+				rebalance = true
+			}
+		}
+
+		if rebalance {
+			c.rebalance()
+		}
+	}
+}
+
+// rebalance removes dead shards from the Ring.
+func (c *ringShards) rebalance() {
+	c.mu.RLock()
+	shards := c.shards
+	c.mu.RUnlock()
+
+	hash := newConsistentHash(c.opt)
+	var shardsNum int
+	for name, shard := range shards {
+		if shard.IsUp() {
+			hash.Add(name)
+			shardsNum++
+		}
+	}
+
+	c.mu.Lock()
+	c.hash = hash
+	c.len = shardsNum
+	c.mu.Unlock()
+}
+
+func (c *ringShards) Len() int {
+	c.mu.RLock()
+	l := c.len
+	c.mu.RUnlock()
+	return l
+}
+
+func (c *ringShards) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil
+	}
+	c.closed = true
+
+	var firstErr error
+	for _, shard := range c.shards {
+		if err := shard.Client.Close(); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	c.hash = nil
+	c.shards = nil
+	c.list = nil
+
+	return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+type ring struct {
+	opt           *RingOptions
+	shards        *ringShards
+	cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// Ring is a Redis client that uses consistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When a shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+	*ring
+	cmdable
+	hooks
+	ctx context.Context
+}
+
+func NewRing(opt *RingOptions) *Ring {
+	opt.init()
+
+	ring := Ring{
+		ring: &ring{
+			opt:    opt,
+			shards: newRingShards(opt),
+		},
+		ctx: context.Background(),
+	}
+	ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
+	ring.cmdable = ring.Process
+
+	for name, addr := range opt.Addrs {
+		shard := newRingShard(opt, name, addr)
+		ring.shards.Add(name, shard)
+	}
+
+	go ring.shards.Heartbeat(opt.HeartbeatFrequency)
+
+	return &ring
+}
+
+func newRingShard(opt *RingOptions, name, addr string) *Client {
+	clopt := opt.clientOptions(name)
+	clopt.Addr = addr
+	shard := NewClient(clopt)
+	if opt.OnNewShard != nil {
+		opt.OnNewShard(shard)
+	}
+	return shard
+}
+
+func (c *Ring) Context() context.Context {
+	return c.ctx
+}
+
+func (c *Ring) WithContext(ctx context.Context) *Ring {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.cmdable = clone.Process
+	clone.hooks.Lock()
+	clone.ctx = ctx
+	return &clone
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Ring) Do(args ...interface{}) *Cmd {
+	return c.DoContext(c.ctx, args...)
+}
+
+func (c *Ring) DoContext(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(args...)
+	_ = c.ProcessContext(ctx, cmd)
+	return cmd
+}
+
+func (c *Ring) Process(cmd Cmder) error {
+	return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *Ring) ProcessContext(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.process)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+	return c.opt
+}
+
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+	shards := c.shards.List()
+	var acc PoolStats
+	for _, shard := range shards {
+		s := shard.Client.connPool.Stats()
+		acc.Hits += s.Hits
+		acc.Misses += s.Misses
+		acc.Timeouts += s.Timeouts
+		acc.TotalConns += s.TotalConns
+		acc.IdleConns += s.IdleConns
+	}
+	return &acc
+}
+
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+	return c.shards.Len()
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(channels ...string) *PubSub {
+	if len(channels) == 0 {
+		panic("at least one channel is required")
+	}
+
+	shard, err := c.shards.GetByKey(channels[0])
+	if err != nil {
+		//TODO: return PubSub with sticky error
+		panic(err)
+	}
+	return shard.Client.Subscribe(channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(channels ...string) *PubSub {
+	if len(channels) == 0 {
+		panic("at least one channel is required")
+	}
+
+	shard, err := c.shards.GetByKey(channels[0])
+	if err != nil {
+		//TODO: return PubSub with sticky error
+		panic(err)
+	}
+	return shard.Client.PSubscribe(channels...)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(fn func(client *Client) error) error {
+	shards := c.shards.List()
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+	for _, shard := range shards {
+		if shard.IsDown() {
+			continue
+		}
+
+		wg.Add(1)
+		go func(shard *ringShard) {
+			defer wg.Done()
+			err := fn(shard.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(shard)
+	}
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+func (c *Ring) cmdsInfo() (map[string]*CommandInfo, error) {
+	shards := c.shards.List()
+	firstErr := errRingShardsDown
+	for _, shard := range shards {
+		cmdsInfo, err := shard.Client.Command().Result()
+		if err == nil {
+			return cmdsInfo, nil
+		}
+		if firstErr == nil {
+			firstErr = err
+		}
+	}
+	return nil, firstErr
+}
+
+func (c *Ring) cmdInfo(name string) *CommandInfo {
+	cmdsInfo, err := c.cmdsInfoCache.Get()
+	if err != nil {
+		return nil
+	}
+	info := cmdsInfo[name]
+	if info == nil {
+		internal.Logger.Printf("info for cmd=%s not found", name)
+	}
+	return info
+}
+
+func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
+	cmdInfo := c.cmdInfo(cmd.Name())
+	pos := cmdFirstKeyPos(cmd, cmdInfo)
+	if pos == 0 {
+		return c.shards.Random()
+	}
+	firstKey := cmd.stringArg(pos)
+	return c.shards.GetByKey(firstKey)
+}
+
+func (c *Ring) process(ctx context.Context, cmd Cmder) error {
+	err := c._process(ctx, cmd)
+	if err != nil {
+		cmd.SetErr(err)
+		return err
+	}
+	return nil
+}
+
+func (c *Ring) _process(ctx context.Context, cmd Cmder) error {
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		shard, err := c.cmdShard(cmd)
+		if err != nil {
+			return err
+		}
+
+		lastErr = shard.Client._process(ctx, cmd)
+		if lastErr == nil || !isRetryableError(lastErr, cmd.readTimeout() == nil) {
+			return lastErr
+		}
+	}
+	return lastErr
+}
+
+func (c *Ring) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(fn)
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+		return c.generalProcessPipeline(ctx, cmds, false)
+	})
+}
+
+func (c *Ring) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(fn)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+		return c.generalProcessPipeline(ctx, cmds, true)
+	})
+}
+
+func (c *Ring) generalProcessPipeline(
+	ctx context.Context, cmds []Cmder, tx bool,
+) error {
+	cmdsMap := make(map[string][]Cmder)
+	for _, cmd := range cmds {
+		cmdInfo := c.cmdInfo(cmd.Name())
+		hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+		if hash != "" {
+			hash = c.shards.Hash(hashtag.Key(hash))
+		}
+		cmdsMap[hash] = append(cmdsMap[hash], cmd)
+	}
+
+	var wg sync.WaitGroup
+	for hash, cmds := range cmdsMap {
+		wg.Add(1)
+		go func(hash string, cmds []Cmder) {
+			defer wg.Done()
+
+			err := c.processShardPipeline(ctx, hash, cmds, tx)
+			if err != nil {
+				setCmdsErr(cmds, err)
+			}
+		}(hash, cmds)
+	}
+
+	wg.Wait()
+	return cmdsFirstErr(cmds)
+}
+
+func (c *Ring) processShardPipeline(
+	ctx context.Context, hash string, cmds []Cmder, tx bool,
+) error {
+	//TODO: retry?
+	shard, err := c.shards.GetByHash(hash)
+	if err != nil {
+		return err
+	}
+
+	if tx {
+		err = shard.Client._generalProcessPipeline(
+			ctx, cmds, shard.Client.txPipelineProcessCmds)
+	} else {
+		err = shard.Client._generalProcessPipeline(
+			ctx, cmds, shard.Client.pipelineProcessCmds)
+	}
+	return err
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+	return c.shards.Close()
+}
+
+func (c *Ring) Watch(fn func(*Tx) error, keys ...string) error {
+	if len(keys) == 0 {
+		return fmt.Errorf("redis: Watch requires at least one key")
+	}
+
+	var shards []*ringShard
+	for _, key := range keys {
+		if key != "" {
+			shard, err := c.shards.GetByKey(hashtag.Key(key))
+			if err != nil {
+				return err
+			}
+
+			shards = append(shards, shard)
+		}
+	}
+
+	if len(shards) == 0 {
+		return fmt.Errorf("redis: Watch requires at least one shard")
+	}
+
+	if len(shards) > 1 {
+		for _, shard := range shards[1:] {
+			if shard.Client != shards[0].Client {
+				err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
+				return err
+			}
+		}
+	}
+
+	return shards[0].Client.Watch(fn, keys...)
+}
+
+func newConsistentHash(opt *RingOptions) *consistenthash.Map {
+	return consistenthash.New(opt.HashReplicas, consistenthash.Hash(opt.Hash))
+}

+ 485 - 0
ring_test.go

@@ -0,0 +1,485 @@
+package redis_test
+
+import (
+	"context"
+	"crypto/rand"
+	"fmt"
+	"net"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("Redis Ring", func() {
+	const heartbeat = 100 * time.Millisecond
+
+	var ring *redis.Ring
+
+	setRingKeys := func() {
+		for i := 0; i < 100; i++ {
+			err := ring.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+		}
+	}
+
+	BeforeEach(func() {
+		opt := redisRingOptions()
+		opt.HeartbeatFrequency = heartbeat
+		ring = redis.NewRing(opt)
+
+		err := ring.ForEachShard(func(cl *redis.Client) error {
+			return cl.FlushDB().Err()
+		})
+		Expect(err).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		Expect(ring.Close()).NotTo(HaveOccurred())
+	})
+
+	It("supports WithContext", func() {
+		c, cancel := context.WithCancel(context.Background())
+		cancel()
+
+		err := ring.WithContext(c).Ping().Err()
+		Expect(err).To(MatchError("context canceled"))
+	})
+
+	It("distributes keys", func() {
+		setRingKeys()
+
+		// Both shards should have some keys now.
+		Expect(ringShard1.Info("keyspace").Val()).To(ContainSubstring("keys=57"))
+		Expect(ringShard2.Info("keyspace").Val()).To(ContainSubstring("keys=43"))
+	})
+
+	It("distributes keys when using EVAL", func() {
+		script := redis.NewScript(`
+			local r = redis.call('SET', KEYS[1], ARGV[1])
+			return r
+		`)
+
+		var key string
+		for i := 0; i < 100; i++ {
+			key = fmt.Sprintf("key%d", i)
+			err := script.Run(ring, []string{key}, "value").Err()
+			Expect(err).NotTo(HaveOccurred())
+		}
+
+		Expect(ringShard1.Info("keyspace").Val()).To(ContainSubstring("keys=57"))
+		Expect(ringShard2.Info("keyspace").Val()).To(ContainSubstring("keys=43"))
+	})
+
+	It("uses single shard when one of the shards is down", func() {
+		// Stop ringShard2.
+		Expect(ringShard2.Close()).NotTo(HaveOccurred())
+
+		Eventually(func() int {
+			return ring.Len()
+		}, "30s").Should(Equal(1))
+
+		setRingKeys()
+
+		// RingShard1 should have all keys.
+		Expect(ringShard1.Info("keyspace").Val()).To(ContainSubstring("keys=100"))
+
+		// Start ringShard2.
+		var err error
+		ringShard2, err = startRedis(ringShard2Port)
+		Expect(err).NotTo(HaveOccurred())
+
+		Eventually(func() int {
+			return ring.Len()
+		}, "30s").Should(Equal(2))
+
+		setRingKeys()
+
+		// RingShard2 should have its keys.
+		Expect(ringShard2.Info("keyspace").Val()).To(ContainSubstring("keys=43"))
+	})
+
+	It("supports hash tags", func() {
+		for i := 0; i < 100; i++ {
+			err := ring.Set(fmt.Sprintf("key%d{tag}", i), "value", 0).Err()
+			Expect(err).NotTo(HaveOccurred())
+		}
+
+		Expect(ringShard1.Info("keyspace").Val()).ToNot(ContainSubstring("keys="))
+		Expect(ringShard2.Info("keyspace").Val()).To(ContainSubstring("keys=100"))
+	})
+
+	Describe("pipeline", func() {
+		It("distributes keys", func() {
+			pipe := ring.Pipeline()
+			for i := 0; i < 100; i++ {
+				err := pipe.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
+				Expect(err).NotTo(HaveOccurred())
+			}
+			cmds, err := pipe.Exec()
+			Expect(err).NotTo(HaveOccurred())
+			Expect(cmds).To(HaveLen(100))
+			Expect(pipe.Close()).NotTo(HaveOccurred())
+
+			for _, cmd := range cmds {
+				Expect(cmd.Err()).NotTo(HaveOccurred())
+				Expect(cmd.(*redis.StatusCmd).Val()).To(Equal("OK"))
+			}
+
+			// Both shards should have some keys now.
+			Expect(ringShard1.Info().Val()).To(ContainSubstring("keys=57"))
+			Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=43"))
+		})
+
+		It("is consistent with ring", func() {
+			var keys []string
+			for i := 0; i < 100; i++ {
+				key := make([]byte, 64)
+				_, err := rand.Read(key)
+				Expect(err).NotTo(HaveOccurred())
+				keys = append(keys, string(key))
+			}
+
+			_, err := ring.Pipelined(func(pipe redis.Pipeliner) error {
+				for _, key := range keys {
+					pipe.Set(key, "value", 0).Err()
+				}
+				return nil
+			})
+			Expect(err).NotTo(HaveOccurred())
+
+			for _, key := range keys {
+				val, err := ring.Get(key).Result()
+				Expect(err).NotTo(HaveOccurred())
+				Expect(val).To(Equal("value"))
+			}
+		})
+
+		It("supports hash tags", func() {
+			_, err := ring.Pipelined(func(pipe redis.Pipeliner) error {
+				for i := 0; i < 100; i++ {
+					pipe.Set(fmt.Sprintf("key%d{tag}", i), "value", 0).Err()
+				}
+				return nil
+			})
+			Expect(err).NotTo(HaveOccurred())
+
+			Expect(ringShard1.Info().Val()).ToNot(ContainSubstring("keys="))
+			Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=100"))
+		})
+	})
+
+	Describe("shard passwords", func() {
+		It("can be initialized with a single password, used for all shards", func() {
+			opts := redisRingOptions()
+			opts.Password = "password"
+			ring = redis.NewRing(opts)
+
+			err := ring.Ping().Err()
+			Expect(err).To(MatchError("ERR Client sent AUTH, but no password is set"))
+		})
+
+		It("can be initialized with a passwords map, one for each shard", func() {
+			opts := redisRingOptions()
+			opts.Passwords = map[string]string{
+				"ringShardOne": "password1",
+				"ringShardTwo": "password2",
+			}
+			ring = redis.NewRing(opts)
+
+			err := ring.Ping().Err()
+			Expect(err).To(MatchError("ERR Client sent AUTH, but no password is set"))
+		})
+	})
+})
+
+var _ = Describe("empty Redis Ring", func() {
+	var ring *redis.Ring
+
+	BeforeEach(func() {
+		ring = redis.NewRing(&redis.RingOptions{})
+	})
+
+	AfterEach(func() {
+		Expect(ring.Close()).NotTo(HaveOccurred())
+	})
+
+	It("returns an error", func() {
+		err := ring.Ping().Err()
+		Expect(err).To(MatchError("redis: all ring shards are down"))
+	})
+
+	It("pipeline returns an error", func() {
+		_, err := ring.Pipelined(func(pipe redis.Pipeliner) error {
+			pipe.Ping()
+			return nil
+		})
+		Expect(err).To(MatchError("redis: all ring shards are down"))
+	})
+})
+
+var _ = Describe("Ring watch", func() {
+	const heartbeat = 100 * time.Millisecond
+
+	var ring *redis.Ring
+
+	BeforeEach(func() {
+		opt := redisRingOptions()
+		opt.HeartbeatFrequency = heartbeat
+		ring = redis.NewRing(opt)
+
+		err := ring.ForEachShard(func(cl *redis.Client) error {
+			return cl.FlushDB().Err()
+		})
+		Expect(err).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		Expect(ring.Close()).NotTo(HaveOccurred())
+	})
+
+	It("should Watch", func() {
+		var incr func(string) error
+
+		// Transactionally increments key using GET and SET commands.
+		incr = func(key string) error {
+			err := ring.Watch(func(tx *redis.Tx) error {
+				n, err := tx.Get(key).Int64()
+				if err != nil && err != redis.Nil {
+					return err
+				}
+
+				_, err = tx.TxPipelined(func(pipe redis.Pipeliner) error {
+					pipe.Set(key, strconv.FormatInt(n+1, 10), 0)
+					return nil
+				})
+				return err
+			}, key)
+			if err == redis.TxFailedErr {
+				return incr(key)
+			}
+			return err
+		}
+
+		var wg sync.WaitGroup
+		for i := 0; i < 100; i++ {
+			wg.Add(1)
+			go func() {
+				defer GinkgoRecover()
+				defer wg.Done()
+
+				err := incr("key")
+				Expect(err).NotTo(HaveOccurred())
+			}()
+		}
+		wg.Wait()
+
+		n, err := ring.Get("key").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(n).To(Equal(int64(100)))
+	})
+
+	It("should discard", func() {
+		err := ring.Watch(func(tx *redis.Tx) error {
+			cmds, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+				pipe.Set("key1", "hello1", 0)
+				pipe.Discard()
+				pipe.Set("key2", "hello2", 0)
+				return nil
+			})
+			Expect(err).NotTo(HaveOccurred())
+			Expect(cmds).To(HaveLen(1))
+			return err
+		}, "key1", "key2")
+		Expect(err).NotTo(HaveOccurred())
+
+		get := ring.Get("key1")
+		Expect(get.Err()).To(Equal(redis.Nil))
+		Expect(get.Val()).To(Equal(""))
+
+		get = ring.Get("key2")
+		Expect(get.Err()).NotTo(HaveOccurred())
+		Expect(get.Val()).To(Equal("hello2"))
+	})
+
+	It("returns no error when there are no commands", func() {
+		err := ring.Watch(func(tx *redis.Tx) error {
+			_, err := tx.TxPipelined(func(redis.Pipeliner) error { return nil })
+			return err
+		}, "key")
+		Expect(err).NotTo(HaveOccurred())
+
+		v, err := ring.Ping().Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(v).To(Equal("PONG"))
+	})
+
+	It("should exec bulks", func() {
+		const N = 20000
+
+		err := ring.Watch(func(tx *redis.Tx) error {
+			cmds, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+				for i := 0; i < N; i++ {
+					pipe.Incr("key")
+				}
+				return nil
+			})
+			Expect(err).NotTo(HaveOccurred())
+			Expect(len(cmds)).To(Equal(N))
+			for _, cmd := range cmds {
+				Expect(cmd.Err()).NotTo(HaveOccurred())
+			}
+			return err
+		}, "key")
+		Expect(err).NotTo(HaveOccurred())
+
+		num, err := ring.Get("key").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(num).To(Equal(int64(N)))
+	})
+
+	It("should Watch/Unwatch", func() {
+		var C, N int
+
+		err := ring.Set("key", "0", 0).Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		perform(C, func(id int) {
+			for i := 0; i < N; i++ {
+				err := ring.Watch(func(tx *redis.Tx) error {
+					val, err := tx.Get("key").Result()
+					Expect(err).NotTo(HaveOccurred())
+					Expect(val).NotTo(Equal(redis.Nil))
+
+					num, err := strconv.ParseInt(val, 10, 64)
+					Expect(err).NotTo(HaveOccurred())
+
+					cmds, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+						pipe.Set("key", strconv.FormatInt(num+1, 10), 0)
+						return nil
+					})
+					Expect(cmds).To(HaveLen(1))
+					return err
+				}, "key")
+				if err == redis.TxFailedErr {
+					i--
+					continue
+				}
+				Expect(err).NotTo(HaveOccurred())
+			}
+		})
+
+		val, err := ring.Get("key").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(val).To(Equal(int64(C * N)))
+	})
+
+	It("should close Tx without closing the client", func() {
+		err := ring.Watch(func(tx *redis.Tx) error {
+			_, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+				pipe.Ping()
+				return nil
+			})
+			return err
+		}, "key")
+		Expect(err).NotTo(HaveOccurred())
+
+		Expect(ring.Ping().Err()).NotTo(HaveOccurred())
+	})
+
+	It("respects max size on multi", func() {
+		perform(1000, func(id int) {
+			var ping *redis.StatusCmd
+
+			err := ring.Watch(func(tx *redis.Tx) error {
+				cmds, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+					ping = pipe.Ping()
+					return nil
+				})
+				Expect(err).NotTo(HaveOccurred())
+				Expect(cmds).To(HaveLen(1))
+				return err
+			}, "key")
+			Expect(err).NotTo(HaveOccurred())
+
+			Expect(ping.Err()).NotTo(HaveOccurred())
+			Expect(ping.Val()).To(Equal("PONG"))
+		})
+
+		ring.ForEachShard(func(cl *redis.Client) error {
+			defer GinkgoRecover()
+
+			pool := cl.Pool()
+			Expect(pool.Len()).To(BeNumerically("<=", 10))
+			Expect(pool.IdleLen()).To(BeNumerically("<=", 10))
+			Expect(pool.Len()).To(Equal(pool.IdleLen()))
+
+			return nil
+		})
+	})
+})
+
+var _ = Describe("Ring Tx timeout", func() {
+	const heartbeat = 100 * time.Millisecond
+
+	var ring *redis.Ring
+
+	AfterEach(func() {
+		_ = ring.Close()
+	})
+
+	testTimeout := func() {
+		It("Tx timeouts", func() {
+			err := ring.Watch(func(tx *redis.Tx) error {
+				return tx.Ping().Err()
+			}, "foo")
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+
+		It("Tx Pipeline timeouts", func() {
+			err := ring.Watch(func(tx *redis.Tx) error {
+				_, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+					pipe.Ping()
+					return nil
+				})
+				return err
+			}, "foo")
+			Expect(err).To(HaveOccurred())
+			Expect(err.(net.Error).Timeout()).To(BeTrue())
+		})
+	}
+
+	const pause = 5 * time.Second
+
+	Context("read/write timeout", func() {
+		BeforeEach(func() {
+			opt := redisRingOptions()
+			opt.ReadTimeout = 250 * time.Millisecond
+			opt.WriteTimeout = 250 * time.Millisecond
+			opt.HeartbeatFrequency = heartbeat
+			ring = redis.NewRing(opt)
+
+			err := ring.ForEachShard(func(client *redis.Client) error {
+				return client.ClientPause(pause).Err()
+			})
+			Expect(err).NotTo(HaveOccurred())
+		})
+
+		AfterEach(func() {
+			_ = ring.ForEachShard(func(client *redis.Client) error {
+				defer GinkgoRecover()
+				Eventually(func() error {
+					return client.Ping().Err()
+				}, 2*pause).ShouldNot(HaveOccurred())
+				return nil
+			})
+		})
+
+		testTimeout()
+	})
+})

+ 62 - 0
script.go

@@ -0,0 +1,62 @@
+package redis
+
+import (
+	"crypto/sha1"
+	"encoding/hex"
+	"io"
+	"strings"
+)
+
+type scripter interface {
+	Eval(script string, keys []string, args ...interface{}) *Cmd
+	EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
+	ScriptExists(hashes ...string) *BoolSliceCmd
+	ScriptLoad(script string) *StringCmd
+}
+
+var _ scripter = (*Client)(nil)
+var _ scripter = (*Ring)(nil)
+var _ scripter = (*ClusterClient)(nil)
+
+type Script struct {
+	src, hash string
+}
+
+func NewScript(src string) *Script {
+	h := sha1.New()
+	_, _ = io.WriteString(h, src)
+	return &Script{
+		src:  src,
+		hash: hex.EncodeToString(h.Sum(nil)),
+	}
+}
+
+func (s *Script) Hash() string {
+	return s.hash
+}
+
+func (s *Script) Load(c scripter) *StringCmd {
+	return c.ScriptLoad(s.src)
+}
+
+func (s *Script) Exists(c scripter) *BoolSliceCmd {
+	return c.ScriptExists(s.hash)
+}
+
+func (s *Script) Eval(c scripter, keys []string, args ...interface{}) *Cmd {
+	return c.Eval(s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(c scripter, keys []string, args ...interface{}) *Cmd {
+	return c.EvalSha(s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(c scripter, keys []string, args ...interface{}) *Cmd {
+	r := s.EvalSha(c, keys, args...)
+	if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+		return s.Eval(c, keys, args...)
+	}
+	return r
+}

+ 503 - 0
sentinel.go

@@ -0,0 +1,503 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/internal"
+	"github.com/go-redis/redis/internal/pool"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+	// The master name.
+	MasterName string
+	// A seed list of host:port addresses of sentinel nodes.
+	SentinelAddrs    []string
+	SentinelPassword string
+
+	// Following options are copied from Options struct.
+
+	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error)
+	OnConnect func(*Conn) error
+
+	Password string
+	DB       int
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+
+	TLSConfig *tls.Config
+}
+
+func (opt *FailoverOptions) options() *Options {
+	return &Options{
+		Addr:      "FailoverClient",
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		DB:       opt.DB,
+		Password: opt.Password,
+
+		MaxRetries:      opt.MaxRetries,
+		MinRetryBackoff: opt.MinRetryBackoff,
+		MaxRetryBackoff: opt.MaxRetryBackoff,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolSize:           opt.PoolSize,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+
+		TLSConfig: opt.TLSConfig,
+	}
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+	opt := failoverOpt.options()
+	opt.init()
+
+	failover := &sentinelFailover{
+		masterName:    failoverOpt.MasterName,
+		sentinelAddrs: failoverOpt.SentinelAddrs,
+		password:      failoverOpt.SentinelPassword,
+
+		opt: opt,
+	}
+
+	c := Client{
+		baseClient: newBaseClient(opt, failover.Pool()),
+		ctx:        context.Background(),
+	}
+	c.cmdable = c.Process
+	c.onClose = failover.Close
+
+	return &c
+}
+
+//------------------------------------------------------------------------------
+
+type SentinelClient struct {
+	*baseClient
+	ctx context.Context
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+	opt.init()
+	c := &SentinelClient{
+		baseClient: &baseClient{
+			opt:      opt,
+			connPool: newConnPool(opt),
+		},
+		ctx: context.Background(),
+	}
+	return c
+}
+
+func (c *SentinelClient) Context() context.Context {
+	return c.ctx
+}
+
+func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.ctx = ctx
+	return &clone
+}
+
+func (c *SentinelClient) Process(cmd Cmder) error {
+	return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *SentinelClient) ProcessContext(ctx context.Context, cmd Cmder) error {
+	return c.baseClient.process(ctx, cmd)
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+	pubsub := &PubSub{
+		opt: c.opt,
+
+		newConn: func(channels []string) (*pool.Conn, error) {
+			return c.newConn(context.TODO())
+		},
+		closeConn: c.connPool.CloseConn,
+	}
+	pubsub.init()
+	return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping() *StringCmd {
+	cmd := NewStringCmd("ping")
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.Subscribe(channels...)
+	}
+	return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.PSubscribe(channels...)
+	}
+	return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("sentinel", "get-master-addr-by-name", name)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+func (c *SentinelClient) Sentinels(name string) *SliceCmd {
+	cmd := NewSliceCmd("sentinel", "sentinels", name)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(name string) *StatusCmd {
+	cmd := NewStatusCmd("sentinel", "failover", name)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every slave and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(pattern string) *IntCmd {
+	cmd := NewIntCmd("sentinel", "reset", pattern)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig() *StatusCmd {
+	cmd := NewStatusCmd("sentinel", "flushconfig")
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(name string) *StringStringMapCmd {
+	cmd := NewStringStringMapCmd("sentinel", "master", name)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters() *SliceCmd {
+	cmd := NewSliceCmd("sentinel", "masters")
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Slaves shows a list of slaves for the specified master and their state.
+func (c *SentinelClient) Slaves(name string) *SliceCmd {
+	cmd := NewSliceCmd("sentinel", "slaves", name)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(name string) *StringCmd {
+	cmd := NewStringCmd("sentinel", "ckquorum", name)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(name, ip, port, quorum string) *StringCmd {
+	cmd := NewStringCmd("sentinel", "monitor", name, ip, port, quorum)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(name, option, value string) *StringCmd {
+	cmd := NewStringCmd("sentinel", "set", name, option, value)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(name string) *StringCmd {
+	cmd := NewStringCmd("sentinel", "remove", name)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+type sentinelFailover struct {
+	sentinelAddrs []string
+
+	opt      *Options
+	password string
+
+	pool     *pool.ConnPool
+	poolOnce sync.Once
+
+	mu          sync.RWMutex
+	masterName  string
+	_masterAddr string
+	sentinel    *SentinelClient
+	pubsub      *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.sentinel != nil {
+		return c.closeSentinel()
+	}
+	return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+	firstErr := c.pubsub.Close()
+	c.pubsub = nil
+
+	err := c.sentinel.Close()
+	if err != nil && firstErr == nil {
+		firstErr = err
+	}
+	c.sentinel = nil
+
+	return firstErr
+}
+
+func (c *sentinelFailover) Pool() *pool.ConnPool {
+	c.poolOnce.Do(func() {
+		opt := *c.opt
+		opt.Dialer = c.dial
+		c.pool = newConnPool(&opt)
+	})
+	return c.pool
+}
+
+func (c *sentinelFailover) dial(ctx context.Context, network, _ string) (net.Conn, error) {
+	addr, err := c.MasterAddr()
+	if err != nil {
+		return nil, err
+	}
+	if c.opt.Dialer != nil {
+		return c.opt.Dialer(ctx, network, addr)
+	}
+	return net.DialTimeout("tcp", addr, c.opt.DialTimeout)
+}
+
+func (c *sentinelFailover) MasterAddr() (string, error) {
+	addr, err := c.masterAddr()
+	if err != nil {
+		return "", err
+	}
+	c.switchMaster(addr)
+	return addr, nil
+}
+
+func (c *sentinelFailover) masterAddr() (string, error) {
+	c.mu.RLock()
+	sentinel := c.sentinel
+	c.mu.RUnlock()
+
+	if sentinel != nil {
+		addr := c.getMasterAddr(sentinel)
+		if addr != "" {
+			return addr, nil
+		}
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.sentinel != nil {
+		addr := c.getMasterAddr(c.sentinel)
+		if addr != "" {
+			return addr, nil
+		}
+		_ = c.closeSentinel()
+	}
+
+	for i, sentinelAddr := range c.sentinelAddrs {
+		sentinel := NewSentinelClient(&Options{
+			Addr:   sentinelAddr,
+			Dialer: c.opt.Dialer,
+
+			Password: c.password,
+
+			MaxRetries: c.opt.MaxRetries,
+
+			DialTimeout:  c.opt.DialTimeout,
+			ReadTimeout:  c.opt.ReadTimeout,
+			WriteTimeout: c.opt.WriteTimeout,
+
+			PoolSize:           c.opt.PoolSize,
+			PoolTimeout:        c.opt.PoolTimeout,
+			IdleTimeout:        c.opt.IdleTimeout,
+			IdleCheckFrequency: c.opt.IdleCheckFrequency,
+
+			TLSConfig: c.opt.TLSConfig,
+		})
+
+		masterAddr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
+		if err != nil {
+			internal.Logger.Printf("sentinel: GetMasterAddrByName master=%q failed: %s",
+				c.masterName, err)
+			_ = sentinel.Close()
+			continue
+		}
+
+		// Push working sentinel to the top.
+		c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+		c.setSentinel(sentinel)
+
+		addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+		return addr, nil
+	}
+
+	return "", errors.New("redis: all sentinels are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(sentinel *SentinelClient) string {
+	addr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
+	if err != nil {
+		internal.Logger.Printf("sentinel: GetMasterAddrByName name=%q failed: %s",
+			c.masterName, err)
+		return ""
+	}
+	return net.JoinHostPort(addr[0], addr[1])
+}
+
+func (c *sentinelFailover) switchMaster(addr string) {
+	c.mu.RLock()
+	masterAddr := c._masterAddr
+	c.mu.RUnlock()
+	if masterAddr == addr {
+		return
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c._masterAddr == addr {
+		return
+	}
+
+	internal.Logger.Printf("sentinel: new master=%q addr=%q",
+		c.masterName, addr)
+	_ = c.Pool().Filter(func(cn *pool.Conn) bool {
+		return cn.RemoteAddr().String() != addr
+	})
+	c._masterAddr = addr
+}
+
+func (c *sentinelFailover) setSentinel(sentinel *SentinelClient) {
+	if c.sentinel != nil {
+		panic("not reached")
+	}
+	c.sentinel = sentinel
+	c.discoverSentinels()
+
+	c.pubsub = sentinel.Subscribe("+switch-master")
+	go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels() {
+	sentinels, err := c.sentinel.Sentinels(c.masterName).Result()
+	if err != nil {
+		internal.Logger.Printf("sentinel: Sentinels master=%q failed: %s", c.masterName, err)
+		return
+	}
+	for _, sentinel := range sentinels {
+		vals := sentinel.([]interface{})
+		for i := 0; i < len(vals); i += 2 {
+			key := vals[i].(string)
+			if key == "name" {
+				sentinelAddr := vals[i+1].(string)
+				if !contains(c.sentinelAddrs, sentinelAddr) {
+					internal.Logger.Printf("sentinel: discovered new sentinel=%q for master=%q",
+						sentinelAddr, c.masterName)
+					c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+				}
+			}
+		}
+	}
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+	ch := pubsub.Channel()
+	for {
+		msg, ok := <-ch
+		if !ok {
+			break
+		}
+
+		if msg.Channel == "+switch-master" {
+			parts := strings.Split(msg.Payload, " ")
+			if parts[0] != c.masterName {
+				internal.Logger.Printf("sentinel: ignore addr for master=%q", parts[0])
+				continue
+			}
+			addr := net.JoinHostPort(parts[3], parts[4])
+			c.switchMaster(addr)
+		}
+	}
+}
+
+func contains(slice []string, str string) bool {
+	for _, s := range slice {
+		if s == str {
+			return true
+		}
+	}
+	return false
+}

+ 88 - 0
sentinel_test.go

@@ -0,0 +1,88 @@
+package redis_test
+
+import (
+	"github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("Sentinel", func() {
+	var client *redis.Client
+
+	BeforeEach(func() {
+		client = redis.NewFailoverClient(&redis.FailoverOptions{
+			MasterName:    sentinelName,
+			SentinelAddrs: []string{":" + sentinelPort},
+		})
+		Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("should facilitate failover", func() {
+		// Set value on master.
+		err := client.Set("foo", "master", 0).Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		// Verify.
+		val, err := sentinelMaster.Get("foo").Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(val).To(Equal("master"))
+
+		// Create subscription.
+		ch := client.Subscribe("foo").Channel()
+
+		// Wait until replicated.
+		Eventually(func() string {
+			return sentinelSlave1.Get("foo").Val()
+		}, "1s", "100ms").Should(Equal("master"))
+		Eventually(func() string {
+			return sentinelSlave2.Get("foo").Val()
+		}, "1s", "100ms").Should(Equal("master"))
+
+		// Wait until slaves are picked up by sentinel.
+		Eventually(func() string {
+			return sentinel.Info().Val()
+		}, "10s", "100ms").Should(ContainSubstring("slaves=2"))
+
+		// Kill master.
+		sentinelMaster.Shutdown()
+		Eventually(func() error {
+			return sentinelMaster.Ping().Err()
+		}, "5s", "100ms").Should(HaveOccurred())
+
+		// Wait for Redis sentinel to elect new master.
+		Eventually(func() string {
+			return sentinelSlave1.Info().Val() + sentinelSlave2.Info().Val()
+		}, "30s", "1s").Should(ContainSubstring("role:master"))
+
+		// Check that client picked up new master.
+		Eventually(func() error {
+			return client.Get("foo").Err()
+		}, "5s", "100ms").ShouldNot(HaveOccurred())
+
+		// Publish message to check if subscription is renewed.
+		err = client.Publish("foo", "hello").Err()
+		Expect(err).NotTo(HaveOccurred())
+
+		var msg *redis.Message
+		Eventually(ch, "5s").Should(Receive(&msg))
+		Expect(msg.Channel).To(Equal("foo"))
+		Expect(msg.Payload).To(Equal("hello"))
+	})
+
+	It("supports DB selection", func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+
+		client = redis.NewFailoverClient(&redis.FailoverOptions{
+			MasterName:    sentinelName,
+			SentinelAddrs: []string{":" + sentinelPort},
+			DB:            1,
+		})
+		err := client.Ping().Err()
+		Expect(err).NotTo(HaveOccurred())
+	})
+})

+ 10 - 0
testdata/redis.conf

@@ -0,0 +1,10 @@
+# Minimal redis.conf
+
+port 6379
+daemonize no
+dir .
+save ""
+appendonly yes
+cluster-config-file nodes.conf
+cluster-node-timeout 30000
+maxclients 1001

+ 156 - 0
tx.go

@@ -0,0 +1,156 @@
+package redis
+
+import (
+	"context"
+
+	"github.com/go-redis/redis/internal/pool"
+	"github.com/go-redis/redis/internal/proto"
+)
+
+// TxFailedErr transaction redis failed.
+const TxFailedErr = proto.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+// If you don't need WATCH it is better to use Pipeline.
+type Tx struct {
+	baseClient
+	cmdable
+	statefulCmdable
+	hooks
+	ctx context.Context
+}
+
+func (c *Client) newTx(ctx context.Context) *Tx {
+	tx := Tx{
+		baseClient: baseClient{
+			opt:      c.opt,
+			connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true),
+		},
+		hooks: c.hooks.Clone(),
+		ctx:   ctx,
+	}
+	tx.init()
+	return &tx
+}
+
+func (c *Tx) init() {
+	c.cmdable = c.Process
+	c.statefulCmdable = c.Process
+}
+
+func (c *Tx) Context() context.Context {
+	return c.ctx
+}
+
+func (c *Tx) WithContext(ctx context.Context) *Tx {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.init()
+	clone.hooks.Lock()
+	clone.ctx = ctx
+	return &clone
+}
+
+func (c *Tx) Process(cmd Cmder) error {
+	return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *Tx) ProcessContext(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+// Watch prepares a transaction and marks the keys to be watched
+// for conditional execution if there are any keys.
+//
+// The transaction is automatically closed when fn exits.
+func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
+	return c.WatchContext(c.ctx, fn, keys...)
+}
+
+func (c *Client) WatchContext(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+	tx := c.newTx(ctx)
+	if len(keys) > 0 {
+		if err := tx.Watch(keys...).Err(); err != nil {
+			_ = tx.Close()
+			return err
+		}
+	}
+
+	err := fn(tx)
+	_ = tx.Close()
+	return err
+}
+
+// Close closes the transaction, releasing any open resources.
+func (c *Tx) Close() error {
+	_ = c.Unwatch().Err()
+	return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(keys ...string) *StatusCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "watch"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStatusCmd(args...)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(keys ...string) *StatusCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "unwatch"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStatusCmd(args...)
+	_ = c.Process(cmd)
+	return cmd
+}
+
+func (c *Tx) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx: c.ctx,
+		exec: func(ctx context.Context, cmds []Cmder) error {
+			return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+		},
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(fn)
+}
+
+// TxPipelined executes commands queued in the fn in a transaction.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns an error of the first
+// failed command or nil.
+func (c *Tx) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(fn)
+}
+
+// TxPipeline creates a new pipeline. Usually it is more convenient to use TxPipelined.
+func (c *Tx) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx: c.ctx,
+		exec: func(ctx context.Context, cmds []Cmder) error {
+			return c.hooks.processPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+		},
+	}
+	pipe.init()
+	return &pipe
+}

+ 151 - 0
tx_test.go

@@ -0,0 +1,151 @@
+package redis_test
+
+import (
+	"context"
+	"strconv"
+	"sync"
+
+	"github.com/go-redis/redis/v7"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+var _ = Describe("Tx", func() {
+	var client *redis.Client
+
+	BeforeEach(func() {
+		client = redis.NewClient(redisOptions())
+		Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+	})
+
+	AfterEach(func() {
+		Expect(client.Close()).NotTo(HaveOccurred())
+	})
+
+	It("should Watch", func() {
+		var incr func(string) error
+
+		// Transactionally increments key using GET and SET commands.
+		incr = func(key string) error {
+			err := client.Watch(func(tx *redis.Tx) error {
+				n, err := tx.Get(key).Int64()
+				if err != nil && err != redis.Nil {
+					return err
+				}
+
+				_, err = tx.TxPipelined(func(pipe redis.Pipeliner) error {
+					pipe.Set(key, strconv.FormatInt(n+1, 10), 0)
+					return nil
+				})
+				return err
+			}, key)
+			if err == redis.TxFailedErr {
+				return incr(key)
+			}
+			return err
+		}
+
+		var wg sync.WaitGroup
+		for i := 0; i < 100; i++ {
+			wg.Add(1)
+			go func() {
+				defer GinkgoRecover()
+				defer wg.Done()
+
+				err := incr("key")
+				Expect(err).NotTo(HaveOccurred())
+			}()
+		}
+		wg.Wait()
+
+		n, err := client.Get("key").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(n).To(Equal(int64(100)))
+	})
+
+	It("should discard", func() {
+		err := client.Watch(func(tx *redis.Tx) error {
+			cmds, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+				pipe.Set("key1", "hello1", 0)
+				pipe.Discard()
+				pipe.Set("key2", "hello2", 0)
+				return nil
+			})
+			Expect(err).NotTo(HaveOccurred())
+			Expect(cmds).To(HaveLen(1))
+			return err
+		}, "key1", "key2")
+		Expect(err).NotTo(HaveOccurred())
+
+		get := client.Get("key1")
+		Expect(get.Err()).To(Equal(redis.Nil))
+		Expect(get.Val()).To(Equal(""))
+
+		get = client.Get("key2")
+		Expect(get.Err()).NotTo(HaveOccurred())
+		Expect(get.Val()).To(Equal("hello2"))
+	})
+
+	It("returns no error when there are no commands", func() {
+		err := client.Watch(func(tx *redis.Tx) error {
+			_, err := tx.TxPipelined(func(redis.Pipeliner) error { return nil })
+			return err
+		})
+		Expect(err).NotTo(HaveOccurred())
+
+		v, err := client.Ping().Result()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(v).To(Equal("PONG"))
+	})
+
+	It("should exec bulks", func() {
+		const N = 20000
+
+		err := client.Watch(func(tx *redis.Tx) error {
+			cmds, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+				for i := 0; i < N; i++ {
+					pipe.Incr("key")
+				}
+				return nil
+			})
+			Expect(err).NotTo(HaveOccurred())
+			Expect(len(cmds)).To(Equal(N))
+			for _, cmd := range cmds {
+				Expect(cmd.Err()).NotTo(HaveOccurred())
+			}
+			return err
+		})
+		Expect(err).NotTo(HaveOccurred())
+
+		num, err := client.Get("key").Int64()
+		Expect(err).NotTo(HaveOccurred())
+		Expect(num).To(Equal(int64(N)))
+	})
+
+	It("should recover from bad connection", func() {
+		// Put bad connection in the pool.
+		cn, err := client.Pool().Get(context.Background())
+		Expect(err).NotTo(HaveOccurred())
+
+		cn.SetNetConn(&badConn{})
+		client.Pool().Put(cn)
+
+		do := func() error {
+			err := client.Watch(func(tx *redis.Tx) error {
+				_, err := tx.TxPipelined(func(pipe redis.Pipeliner) error {
+					pipe.Ping()
+					return nil
+				})
+				return err
+			})
+			return err
+		}
+
+		err = do()
+		Expect(err).To(MatchError("bad connection"))
+
+		err = do()
+		Expect(err).NotTo(HaveOccurred())
+	})
+})

+ 194 - 0
universal.go

@@ -0,0 +1,194 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"time"
+)
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+	// Either a single address or a seed list of host:port addresses
+	// of cluster/sentinel nodes.
+	Addrs []string
+
+	// Database to be selected after connecting to the server.
+	// Only single-node and failover clients.
+	DB int
+
+	// Common options.
+
+	Dialer             func(ctx context.Context, network, addr string) (net.Conn, error)
+	OnConnect          func(*Conn) error
+	Password           string
+	MaxRetries         int
+	MinRetryBackoff    time.Duration
+	MaxRetryBackoff    time.Duration
+	DialTimeout        time.Duration
+	ReadTimeout        time.Duration
+	WriteTimeout       time.Duration
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+	TLSConfig          *tls.Config
+
+	// Only cluster clients.
+
+	MaxRedirects   int
+	ReadOnly       bool
+	RouteByLatency bool
+	RouteRandomly  bool
+
+	// The sentinel master name.
+	// Only failover clients.
+	MasterName string
+}
+
+// Cluster returns cluster options created from the universal options.
+func (o *UniversalOptions) Cluster() *ClusterOptions {
+	if len(o.Addrs) == 0 {
+		o.Addrs = []string{"127.0.0.1:6379"}
+	}
+
+	return &ClusterOptions{
+		Addrs:     o.Addrs,
+		Dialer:    o.Dialer,
+		OnConnect: o.OnConnect,
+
+		Password: o.Password,
+
+		MaxRedirects:   o.MaxRedirects,
+		ReadOnly:       o.ReadOnly,
+		RouteByLatency: o.RouteByLatency,
+		RouteRandomly:  o.RouteRandomly,
+
+		MaxRetries:      o.MaxRetries,
+		MinRetryBackoff: o.MinRetryBackoff,
+		MaxRetryBackoff: o.MaxRetryBackoff,
+
+		DialTimeout:        o.DialTimeout,
+		ReadTimeout:        o.ReadTimeout,
+		WriteTimeout:       o.WriteTimeout,
+		PoolSize:           o.PoolSize,
+		MinIdleConns:       o.MinIdleConns,
+		MaxConnAge:         o.MaxConnAge,
+		PoolTimeout:        o.PoolTimeout,
+		IdleTimeout:        o.IdleTimeout,
+		IdleCheckFrequency: o.IdleCheckFrequency,
+
+		TLSConfig: o.TLSConfig,
+	}
+}
+
+// Failover returns failover options created from the universal options.
+func (o *UniversalOptions) Failover() *FailoverOptions {
+	if len(o.Addrs) == 0 {
+		o.Addrs = []string{"127.0.0.1:26379"}
+	}
+
+	return &FailoverOptions{
+		SentinelAddrs: o.Addrs,
+		MasterName:    o.MasterName,
+
+		Dialer:    o.Dialer,
+		OnConnect: o.OnConnect,
+
+		DB:       o.DB,
+		Password: o.Password,
+
+		MaxRetries:      o.MaxRetries,
+		MinRetryBackoff: o.MinRetryBackoff,
+		MaxRetryBackoff: o.MaxRetryBackoff,
+
+		DialTimeout:  o.DialTimeout,
+		ReadTimeout:  o.ReadTimeout,
+		WriteTimeout: o.WriteTimeout,
+
+		PoolSize:           o.PoolSize,
+		MinIdleConns:       o.MinIdleConns,
+		MaxConnAge:         o.MaxConnAge,
+		PoolTimeout:        o.PoolTimeout,
+		IdleTimeout:        o.IdleTimeout,
+		IdleCheckFrequency: o.IdleCheckFrequency,
+
+		TLSConfig: o.TLSConfig,
+	}
+}
+
+// Simple returns basic options created from the universal options.
+func (o *UniversalOptions) Simple() *Options {
+	addr := "127.0.0.1:6379"
+	if len(o.Addrs) > 0 {
+		addr = o.Addrs[0]
+	}
+
+	return &Options{
+		Addr:      addr,
+		Dialer:    o.Dialer,
+		OnConnect: o.OnConnect,
+
+		DB:       o.DB,
+		Password: o.Password,
+
+		MaxRetries:      o.MaxRetries,
+		MinRetryBackoff: o.MinRetryBackoff,
+		MaxRetryBackoff: o.MaxRetryBackoff,
+
+		DialTimeout:  o.DialTimeout,
+		ReadTimeout:  o.ReadTimeout,
+		WriteTimeout: o.WriteTimeout,
+
+		PoolSize:           o.PoolSize,
+		MinIdleConns:       o.MinIdleConns,
+		MaxConnAge:         o.MaxConnAge,
+		PoolTimeout:        o.PoolTimeout,
+		IdleTimeout:        o.IdleTimeout,
+		IdleCheckFrequency: o.IdleCheckFrequency,
+
+		TLSConfig: o.TLSConfig,
+	}
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// can connect to either clusters, or sentinel-backed failover instances
+// or simple single-instance servers. This can be useful for testing
+// cluster-specific applications locally.
+type UniversalClient interface {
+	Cmdable
+	Context() context.Context
+	AddHook(Hook)
+	Watch(fn func(*Tx) error, keys ...string) error
+	Do(args ...interface{}) *Cmd
+	DoContext(ctx context.Context, args ...interface{}) *Cmd
+	Process(cmd Cmder) error
+	ProcessContext(ctx context.Context, cmd Cmder) error
+	Subscribe(channels ...string) *PubSub
+	PSubscribe(channels ...string) *PubSub
+	Close() error
+}
+
+var _ UniversalClient = (*Client)(nil)
+var _ UniversalClient = (*ClusterClient)(nil)
+var _ UniversalClient = (*Ring)(nil)
+
+// NewUniversalClient returns a new multi client. The type of client returned depends
+// on the following three conditions:
+//
+// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned
+// 2. if the number of Addrs is two or more, a ClusterClient will be returned
+// 3. otherwise, a single-node redis Client will be returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+	if opts.MasterName != "" {
+		return NewFailoverClient(opts.Failover())
+	} else if len(opts.Addrs) > 1 {
+		return NewClusterClient(opts.Cluster())
+	}
+	return NewClient(opts.Simple())
+}

+ 41 - 0
universal_test.go

@@ -0,0 +1,41 @@
+package redis_test
+
+import (
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+
+	"github.com/go-redis/redis/v7"
+)
+
+var _ = Describe("UniversalClient", func() {
+	var client redis.UniversalClient
+
+	AfterEach(func() {
+		if client != nil {
+			Expect(client.Close()).To(Succeed())
+		}
+	})
+
+	It("should connect to failover servers", func() {
+		client = redis.NewUniversalClient(&redis.UniversalOptions{
+			MasterName: sentinelName,
+			Addrs:      []string{":" + sentinelPort},
+		})
+		Expect(client.Ping().Err()).NotTo(HaveOccurred())
+	})
+
+	It("should connect to simple servers", func() {
+		client = redis.NewUniversalClient(&redis.UniversalOptions{
+			Addrs: []string{redisAddr},
+		})
+		Expect(client.Ping().Err()).NotTo(HaveOccurred())
+	})
+
+	It("should connect to clusters", func() {
+		client = redis.NewUniversalClient(&redis.UniversalOptions{
+			Addrs: cluster.addrs(),
+		})
+		Expect(client.Ping().Err()).NotTo(HaveOccurred())
+	})
+
+})