|
|
@@ -614,6 +614,9 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
|
|
return
|
|
|
}
|
|
|
|
|
|
+ plog.Infof("applying snapshot at index %d...", ep.snapi)
|
|
|
+ defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
|
|
|
+
|
|
|
if apply.snapshot.Metadata.Index <= ep.appliedi {
|
|
|
plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
|
|
|
apply.snapshot.Metadata.Index, ep.appliedi)
|
|
|
@@ -630,17 +633,25 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
|
|
}
|
|
|
|
|
|
newbe := backend.NewDefaultBackend(fn)
|
|
|
+
|
|
|
+ plog.Info("restoring mvcc store...")
|
|
|
+
|
|
|
if err := s.kv.Restore(newbe); err != nil {
|
|
|
plog.Panicf("restore KV error: %v", err)
|
|
|
}
|
|
|
s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex())
|
|
|
|
|
|
+ plog.Info("finished restoring mvcc store")
|
|
|
+
|
|
|
// Closing old backend might block until all the txns
|
|
|
// on the backend are finished.
|
|
|
// We do not want to wait on closing the old backend.
|
|
|
s.bemu.Lock()
|
|
|
oldbe := s.be
|
|
|
go func() {
|
|
|
+ plog.Info("closing old backend...")
|
|
|
+ defer plog.Info("finished closing old backend")
|
|
|
+
|
|
|
if err := oldbe.Close(); err != nil {
|
|
|
plog.Panicf("close backend error: %v", err)
|
|
|
}
|
|
|
@@ -650,36 +661,51 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
|
|
s.bemu.Unlock()
|
|
|
|
|
|
if s.lessor != nil {
|
|
|
+ plog.Info("recovering lessor...")
|
|
|
s.lessor.Recover(newbe, s.kv)
|
|
|
+ plog.Info("finished recovering lessor")
|
|
|
}
|
|
|
|
|
|
+ plog.Info("recovering alarms...")
|
|
|
if err := s.restoreAlarms(); err != nil {
|
|
|
plog.Panicf("restore alarms error: %v", err)
|
|
|
}
|
|
|
+ plog.Info("finished recovering alarms")
|
|
|
|
|
|
if s.authStore != nil {
|
|
|
+ plog.Info("recovering auth store...")
|
|
|
s.authStore.Recover(newbe)
|
|
|
+ plog.Info("finished recovering auth store")
|
|
|
}
|
|
|
|
|
|
+ plog.Info("recovering store v2...")
|
|
|
if err := s.store.Recovery(apply.snapshot.Data); err != nil {
|
|
|
plog.Panicf("recovery store error: %v", err)
|
|
|
}
|
|
|
+ plog.Info("finished recovering store v2")
|
|
|
+
|
|
|
s.cluster.SetBackend(s.be)
|
|
|
+ plog.Info("recovering cluster configuration...")
|
|
|
s.cluster.Recover()
|
|
|
+ plog.Info("finished recovering cluster configuration")
|
|
|
|
|
|
+ plog.Info("removing old peers from network...")
|
|
|
// recover raft transport
|
|
|
s.r.transport.RemoveAllPeers()
|
|
|
+ plog.Info("finished removing old peers from network")
|
|
|
+
|
|
|
+ plog.Info("adding peers from new cluster configuration into network...")
|
|
|
for _, m := range s.cluster.Members() {
|
|
|
if m.ID == s.ID() {
|
|
|
continue
|
|
|
}
|
|
|
s.r.transport.AddPeer(m.ID, m.PeerURLs)
|
|
|
}
|
|
|
+ plog.Info("finished adding peers from new cluster configuration into network...")
|
|
|
|
|
|
ep.appliedi = apply.snapshot.Metadata.Index
|
|
|
ep.snapi = ep.appliedi
|
|
|
ep.confState = apply.snapshot.Metadata.ConfState
|
|
|
- plog.Infof("recovered from incoming snapshot at index %d", ep.snapi)
|
|
|
}
|
|
|
|
|
|
func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
|
|
|
@@ -1075,21 +1101,16 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
|
|
|
plog.Panicf("nodeID should always be equal to member ID")
|
|
|
}
|
|
|
s.cluster.AddMember(m)
|
|
|
- if m.ID == s.id {
|
|
|
- plog.Noticef("added local member %s %v to cluster %s", m.ID, m.PeerURLs, s.cluster.ID())
|
|
|
- } else {
|
|
|
+ if m.ID != s.id {
|
|
|
s.r.transport.AddPeer(m.ID, m.PeerURLs)
|
|
|
- plog.Noticef("added member %s %v to cluster %s", m.ID, m.PeerURLs, s.cluster.ID())
|
|
|
}
|
|
|
case raftpb.ConfChangeRemoveNode:
|
|
|
id := types.ID(cc.NodeID)
|
|
|
s.cluster.RemoveMember(id)
|
|
|
if id == s.id {
|
|
|
return true, nil
|
|
|
- } else {
|
|
|
- s.r.transport.RemovePeer(id)
|
|
|
- plog.Noticef("removed member %s from cluster %s", id, s.cluster.ID())
|
|
|
}
|
|
|
+ s.r.transport.RemovePeer(id)
|
|
|
case raftpb.ConfChangeUpdateNode:
|
|
|
m := new(membership.Member)
|
|
|
if err := json.Unmarshal(cc.Context, m); err != nil {
|
|
|
@@ -1099,11 +1120,8 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
|
|
|
plog.Panicf("nodeID should always be equal to member ID")
|
|
|
}
|
|
|
s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
|
|
|
- if m.ID == s.id {
|
|
|
- plog.Noticef("update local member %s %v in cluster %s", m.ID, m.PeerURLs, s.cluster.ID())
|
|
|
- } else {
|
|
|
+ if m.ID != s.id {
|
|
|
s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
|
|
|
- plog.Noticef("update member %s %v in cluster %s", m.ID, m.PeerURLs, s.cluster.ID())
|
|
|
}
|
|
|
}
|
|
|
return false, nil
|