etcd_test.go 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. package etcd
  2. import (
  3. "fmt"
  4. "math/rand"
  5. "net/http"
  6. "net/http/httptest"
  7. "net/url"
  8. "testing"
  9. "time"
  10. "github.com/coreos/etcd/config"
  11. "github.com/coreos/etcd/store"
  12. )
  13. func TestMultipleNodes(t *testing.T) {
  14. tests := []int{1, 3, 5, 9, 11}
  15. for _, tt := range tests {
  16. es, hs := buildCluster(tt, false)
  17. waitCluster(t, es)
  18. for i := range es {
  19. es[len(es)-i-1].Stop()
  20. }
  21. for i := range hs {
  22. hs[len(hs)-i-1].Close()
  23. }
  24. }
  25. afterTest(t)
  26. }
  27. func TestMultipleTLSNodes(t *testing.T) {
  28. tests := []int{1, 3, 5}
  29. for _, tt := range tests {
  30. es, hs := buildCluster(tt, true)
  31. waitCluster(t, es)
  32. for i := range es {
  33. es[len(es)-i-1].Stop()
  34. }
  35. for i := range hs {
  36. hs[len(hs)-i-1].Close()
  37. }
  38. }
  39. afterTest(t)
  40. }
  41. func TestV2Redirect(t *testing.T) {
  42. es, hs := buildCluster(3, false)
  43. waitCluster(t, es)
  44. u := hs[1].URL
  45. ru := fmt.Sprintf("%s%s", hs[0].URL, "/v2/keys/foo")
  46. tc := NewTestClient()
  47. v := url.Values{}
  48. v.Set("value", "XXX")
  49. resp, _ := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo"), v)
  50. if resp.StatusCode != http.StatusTemporaryRedirect {
  51. t.Errorf("status = %d, want %d", resp.StatusCode, http.StatusTemporaryRedirect)
  52. }
  53. location, err := resp.Location()
  54. if err != nil {
  55. t.Errorf("want err = %, want nil", err)
  56. }
  57. if location.String() != ru {
  58. t.Errorf("location = %v, want %v", location.String(), ru)
  59. }
  60. resp.Body.Close()
  61. for i := range es {
  62. es[len(es)-i-1].Stop()
  63. }
  64. for i := range hs {
  65. hs[len(hs)-i-1].Close()
  66. }
  67. afterTest(t)
  68. }
  69. func TestAdd(t *testing.T) {
  70. tests := []int{3, 4, 5, 6}
  71. for _, tt := range tests {
  72. es := make([]*Server, tt)
  73. hs := make([]*httptest.Server, tt)
  74. for i := 0; i < tt; i++ {
  75. c := config.New()
  76. if i > 0 {
  77. c.Peers = []string{hs[0].URL}
  78. }
  79. es[i], hs[i] = initTestServer(c, int64(i), false)
  80. }
  81. go es[0].Run()
  82. waitMode(participantMode, es[0])
  83. for i := 1; i < tt; i++ {
  84. id := int64(i)
  85. for {
  86. lead := es[0].p.node.Leader()
  87. if lead == -1 {
  88. time.Sleep(defaultElection * es[0].tickDuration)
  89. continue
  90. }
  91. err := es[lead].p.add(id, es[id].raftPubAddr, es[id].pubAddr)
  92. if err == nil {
  93. break
  94. }
  95. switch err {
  96. case tmpErr:
  97. time.Sleep(defaultElection * es[0].tickDuration)
  98. case raftStopErr, stopErr:
  99. t.Fatalf("#%d on %d: unexpected stop", i, lead)
  100. default:
  101. t.Fatal(err)
  102. }
  103. }
  104. go es[i].Run()
  105. waitMode(participantMode, es[i])
  106. for j := 0; j <= i; j++ {
  107. p := fmt.Sprintf("%s/%d", v2machineKVPrefix, id)
  108. w, err := es[j].p.Watch(p, false, false, 1)
  109. if err != nil {
  110. t.Errorf("#%d on %d: %v", i, j, err)
  111. break
  112. }
  113. <-w.EventChan
  114. }
  115. }
  116. for i := range hs {
  117. es[len(hs)-i-1].Stop()
  118. }
  119. for i := range hs {
  120. hs[len(hs)-i-1].Close()
  121. }
  122. }
  123. afterTest(t)
  124. }
  125. func TestRemove(t *testing.T) {
  126. tests := []int{3, 4, 5, 6}
  127. for k, tt := range tests {
  128. es, hs := buildCluster(tt, false)
  129. waitCluster(t, es)
  130. lead, _ := waitLeader(es)
  131. config := config.NewClusterConfig()
  132. config.ActiveSize = 0
  133. if err := es[lead].p.setClusterConfig(config); err != nil {
  134. t.Fatalf("#%d: setClusterConfig err = %v", k, err)
  135. }
  136. // we don't remove the machine from 2-node cluster because it is
  137. // not 100 percent safe in our raft.
  138. // TODO(yichengq): improve it later.
  139. for i := 0; i < tt-2; i++ {
  140. id := int64(i)
  141. send := id
  142. for {
  143. send++
  144. if send > int64(tt-1) {
  145. send = id
  146. }
  147. lead := es[send].p.node.Leader()
  148. if lead == -1 {
  149. time.Sleep(defaultElection * 5 * time.Millisecond)
  150. continue
  151. }
  152. err := es[lead].p.remove(id)
  153. if err == nil {
  154. break
  155. }
  156. switch err {
  157. case tmpErr:
  158. time.Sleep(defaultElection * 5 * time.Millisecond)
  159. case raftStopErr, stopErr:
  160. if lead == id {
  161. break
  162. }
  163. default:
  164. t.Fatal(err)
  165. }
  166. }
  167. waitMode(standbyMode, es[i])
  168. }
  169. for i := range es {
  170. es[len(hs)-i-1].Stop()
  171. }
  172. for i := range hs {
  173. hs[len(hs)-i-1].Close()
  174. }
  175. }
  176. afterTest(t)
  177. // ensure that no goroutines are running
  178. TestGoroutinesRunning(t)
  179. }
  180. func TestBecomeStandby(t *testing.T) {
  181. size := 5
  182. round := 1
  183. for j := 0; j < round; j++ {
  184. es, hs := buildCluster(size, false)
  185. waitCluster(t, es)
  186. lead, _ := waitActiveLeader(es)
  187. i := rand.Intn(size)
  188. // cluster only demotes follower
  189. if int64(i) == lead {
  190. i = (i + 1) % size
  191. }
  192. id := int64(i)
  193. config := config.NewClusterConfig()
  194. config.SyncInterval = 1000
  195. config.ActiveSize = size - 1
  196. if err := es[lead].p.setClusterConfig(config); err != nil {
  197. t.Fatalf("#%d: setClusterConfig err = %v", i, err)
  198. }
  199. if err := es[lead].p.remove(id); err != nil {
  200. t.Fatalf("#%d: remove err = %v", i, err)
  201. }
  202. waitMode(standbyMode, es[i])
  203. for k := 0; k < 4; k++ {
  204. if es[i].s.leader != noneId {
  205. break
  206. }
  207. time.Sleep(20 * time.Millisecond)
  208. }
  209. if g := es[i].s.leader; g != lead {
  210. t.Errorf("#%d: lead = %d, want %d", i, g, lead)
  211. }
  212. for i := range hs {
  213. es[len(hs)-i-1].Stop()
  214. }
  215. for i := range hs {
  216. hs[len(hs)-i-1].Close()
  217. }
  218. }
  219. afterTest(t)
  220. }
  221. // TODO(yichengq): cannot handle previous msgDenial correctly now
  222. func TestModeSwitch(t *testing.T) {
  223. t.Skip("not passed")
  224. size := 5
  225. round := 3
  226. for i := 0; i < size; i++ {
  227. es, hs := buildCluster(size, false)
  228. waitCluster(t, es)
  229. config := config.NewClusterConfig()
  230. config.SyncInterval = 0
  231. id := int64(i)
  232. for j := 0; j < round; j++ {
  233. lead, _ := waitActiveLeader(es)
  234. // cluster only demotes follower
  235. if lead == id {
  236. continue
  237. }
  238. config.ActiveSize = size - 1
  239. if err := es[lead].p.setClusterConfig(config); err != nil {
  240. t.Fatalf("#%d: setClusterConfig err = %v", i, err)
  241. }
  242. if err := es[lead].p.remove(id); err != nil {
  243. t.Fatalf("#%d: remove err = %v", i, err)
  244. }
  245. waitMode(standbyMode, es[i])
  246. for k := 0; k < 4; k++ {
  247. if es[i].s.leader != noneId {
  248. break
  249. }
  250. time.Sleep(20 * time.Millisecond)
  251. }
  252. if g := es[i].s.leader; g != lead {
  253. t.Errorf("#%d: lead = %d, want %d", i, g, lead)
  254. }
  255. config.ActiveSize = size
  256. if err := es[lead].p.setClusterConfig(config); err != nil {
  257. t.Fatalf("#%d: setClusterConfig err = %v", i, err)
  258. }
  259. waitMode(participantMode, es[i])
  260. if err := checkParticipant(i, es); err != nil {
  261. t.Errorf("#%d: check alive err = %v", i, err)
  262. }
  263. }
  264. for i := range hs {
  265. es[len(hs)-i-1].Stop()
  266. }
  267. for i := range hs {
  268. hs[len(hs)-i-1].Close()
  269. }
  270. }
  271. afterTest(t)
  272. }
  273. func buildCluster(number int, tls bool) ([]*Server, []*httptest.Server) {
  274. bootstrapper := 0
  275. es := make([]*Server, number)
  276. hs := make([]*httptest.Server, number)
  277. var seed string
  278. for i := range es {
  279. c := config.New()
  280. if seed != "" {
  281. c.Peers = []string{seed}
  282. }
  283. es[i], hs[i] = initTestServer(c, int64(i), tls)
  284. if i == bootstrapper {
  285. seed = hs[i].URL
  286. } else {
  287. // wait for the previous configuration change to be committed
  288. // or this configuration request might be dropped
  289. w, err := es[0].p.Watch(v2machineKVPrefix, true, false, uint64(i))
  290. if err != nil {
  291. panic(err)
  292. }
  293. <-w.EventChan
  294. }
  295. go es[i].Run()
  296. waitMode(participantMode, es[i])
  297. }
  298. return es, hs
  299. }
  300. func initTestServer(c *config.Config, id int64, tls bool) (e *Server, h *httptest.Server) {
  301. e = New(c, id)
  302. e.SetTick(time.Millisecond * 5)
  303. m := http.NewServeMux()
  304. m.Handle("/", e)
  305. m.Handle("/raft", e.RaftHandler())
  306. m.Handle("/raft/", e.RaftHandler())
  307. if tls {
  308. h = httptest.NewTLSServer(m)
  309. } else {
  310. h = httptest.NewServer(m)
  311. }
  312. e.raftPubAddr = h.URL
  313. e.pubAddr = h.URL
  314. return
  315. }
  316. func waitCluster(t *testing.T, es []*Server) {
  317. n := len(es)
  318. for i, e := range es {
  319. var index uint64
  320. for k := 0; k < n; k++ {
  321. index++
  322. w, err := e.p.Watch(v2machineKVPrefix, true, false, index)
  323. if err != nil {
  324. panic(err)
  325. }
  326. v := <-w.EventChan
  327. // join command may appear several times due to retry
  328. // when timeout
  329. if k > 0 {
  330. pw := fmt.Sprintf("%s/%d", v2machineKVPrefix, k-1)
  331. if v.Node.Key == pw {
  332. continue
  333. }
  334. }
  335. ww := fmt.Sprintf("%s/%d", v2machineKVPrefix, k)
  336. if v.Node.Key != ww {
  337. t.Errorf("#%d path = %v, want %v", i, v.Node.Key, ww)
  338. }
  339. }
  340. }
  341. }
  342. func waitMode(mode int64, e *Server) {
  343. for {
  344. if e.mode.Get() == mode {
  345. return
  346. }
  347. time.Sleep(10 * time.Millisecond)
  348. }
  349. }
  350. // checkParticipant checks the i-th server works well as participant.
  351. func checkParticipant(i int, es []*Server) error {
  352. lead, _ := waitActiveLeader(es)
  353. key := fmt.Sprintf("/%d", rand.Int31())
  354. ev, err := es[lead].p.Set(key, false, "bar", store.Permanent)
  355. if err != nil {
  356. return err
  357. }
  358. w, err := es[i].p.Watch(key, false, false, ev.Index())
  359. if err != nil {
  360. return err
  361. }
  362. select {
  363. case <-w.EventChan:
  364. case <-time.After(8 * defaultHeartbeat * es[i].tickDuration):
  365. return fmt.Errorf("watch timeout")
  366. }
  367. return nil
  368. }