Skip to content

Commit 5cf92a8

Browse files
enjoy-binbinzuiderkwasthpatromadolson
committed
Fix replica can't finish failover when config epoch is outdated (#2178)
When the primary changes the config epoch and then down immediately, the replica may not update the config epoch in time. Although we will broadcast the change in cluster (see #1813), there may be a race in the network or in the code. In this case, the replica will never finish the failover since other primaries will refuse to vote because the replica's slot config epoch is old. We need a way to allow the replica can finish the failover in this case. When the primary refuses to vote because the replica's config epoch is less than the dead primary's config epoch, it can send an UPDATE packet to the replica to inform the replica about the dead primary. The UPDATE message contains information about the dead primary's config epoch and owned slots. The failover will time out, but later the replica can try again with the updated config epoch and it can succeed. Fixes #2169. --------- Signed-off-by: Binbin <[email protected]> Signed-off-by: Harkrishn Patro <[email protected]> Co-authored-by: Viktor Söderqvist <[email protected]> Co-authored-by: Harkrishn Patro <[email protected]> Co-authored-by: Madelyn Olson <[email protected]>
1 parent 6174112 commit 5cf92a8

File tree

3 files changed

+119
-3
lines changed

3 files changed

+119
-3
lines changed

src/cluster_legacy.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3674,9 +3674,10 @@ int clusterProcessPacket(clusterLink *link) {
36743674
if (server.cluster->slots[j] == sender || isSlotUnclaimed(j)) continue;
36753675
if (server.cluster->slots[j]->configEpoch > sender_claimed_config_epoch) {
36763676
serverLog(LL_VERBOSE,
3677-
"Node %.40s has old slots configuration, sending "
3678-
"an UPDATE message about %.40s",
3679-
sender->name, server.cluster->slots[j]->name);
3677+
"Node %.40s (%s) has old slots configuration, sending "
3678+
"an UPDATE message about %.40s (%s)",
3679+
sender->name, sender->human_nodename,
3680+
server.cluster->slots[j]->name, server.cluster->slots[j]->human_nodename);
36803681
clusterSendUpdate(sender->link, server.cluster->slots[j]);
36813682

36823683
/* TODO: instead of exiting the loop send every other
@@ -4622,6 +4623,16 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) {
46224623
"slot %d epoch (%llu) > reqConfigEpoch (%llu)",
46234624
node->name, node->human_nodename, j, (unsigned long long)server.cluster->slots[j]->configEpoch,
46244625
(unsigned long long)requestConfigEpoch);
4626+
4627+
/* Send an UPDATE message to the replica. After receiving the UPDATE message,
4628+
* the replica will update the slots config so that it can initiate a failover
4629+
* again later. Otherwise the replica will never get votes if the primary is down. */
4630+
serverLog(LL_VERBOSE,
4631+
"Node %.40s (%s) has old slots configuration, sending "
4632+
"an UPDATE message about %.40s (%s)",
4633+
node->name, node->human_nodename,
4634+
server.cluster->slots[j]->name, server.cluster->slots[j]->human_nodename);
4635+
clusterSendUpdate(node->link, server.cluster->slots[j]);
46254636
return;
46264637
}
46274638

tests/support/cluster_util.tcl

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,12 @@ proc cluster_allocate_replicas {masters replicas} {
210210
# Setup method to be executed to configure the cluster before the
211211
# tests run.
212212
proc cluster_setup {masters replicas node_count slot_allocator replica_allocator code} {
213+
set config_epoch 1
214+
for {set i 0} {$i < $node_count} {incr i} {
215+
R $i CLUSTER SET-CONFIG-EPOCH $config_epoch
216+
incr config_epoch
217+
}
218+
213219
# Have all nodes meet
214220
if {$::tls} {
215221
set tls_cluster [lindex [R 0 CONFIG GET tls-cluster] 1]

tests/unit/cluster/failover2.tcl

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,3 +133,102 @@ run_solo {cluster} {
133133
}
134134
} ;# start_cluster
135135
} ;# run_solo
136+
137+
# Needs to run in the body of
138+
# start_cluster 3 1 {tags {external:skip cluster} overrides {cluster-replica-validity-factor 0}}
139+
proc test_replica_config_epoch_failover {type} {
140+
test "Replica can update the config epoch when trigger the failover - $type" {
141+
set CLUSTER_PACKET_TYPE_NONE -1
142+
set CLUSTER_PACKET_TYPE_ALL -2
143+
144+
if {$type == "automatic"} {
145+
R 3 CONFIG SET cluster-replica-no-failover no
146+
} elseif {$type == "manual"} {
147+
R 3 CONFIG SET cluster-replica-no-failover yes
148+
}
149+
R 3 DEBUG DROP-CLUSTER-PACKET-FILTER $CLUSTER_PACKET_TYPE_ALL
150+
151+
set R0_nodeid [R 0 cluster myid]
152+
153+
# R 0 is the first node, so we expect its epoch to be the smallest,
154+
# so bumpepoch must succeed and it's config epoch will be changed.
155+
set res [R 0 cluster bumpepoch]
156+
assert_match {BUMPED *} $res
157+
set R0_config_epoch [lindex $res 1]
158+
159+
# Wait for the config epoch to propagate across the cluster.
160+
wait_for_condition 1000 10 {
161+
$R0_config_epoch == [dict get [cluster_get_node_by_id 1 $R0_nodeid] config_epoch] &&
162+
$R0_config_epoch == [dict get [cluster_get_node_by_id 2 $R0_nodeid] config_epoch]
163+
} else {
164+
fail "Other primaries does not update config epoch"
165+
}
166+
# Make sure that replica do not update config epoch.
167+
assert_not_equal $R0_config_epoch [dict get [cluster_get_node_by_id 3 $R0_nodeid] config_epoch]
168+
169+
# Pause the R 0 and wait for the cluster to be down.
170+
pause_process [srv 0 pid]
171+
R 3 DEBUG DROP-CLUSTER-PACKET-FILTER $CLUSTER_PACKET_TYPE_NONE
172+
wait_for_condition 1000 50 {
173+
[CI 1 cluster_state] == "fail" &&
174+
[CI 2 cluster_state] == "fail" &&
175+
[CI 3 cluster_state] == "fail"
176+
} else {
177+
fail "Cluster does not fail"
178+
}
179+
180+
# Make sure both the automatic and the manual failover will fail in the first time.
181+
if {$type == "automatic"} {
182+
wait_for_log_messages -3 {"*Failover attempt expired*"} 0 1000 10
183+
} elseif {$type == "manual"} {
184+
R 3 cluster failover force
185+
wait_for_log_messages -3 {"*Manual failover timed out*"} 0 1000 10
186+
}
187+
188+
# Make sure the primaries prints the relevant logs.
189+
wait_for_log_messages -1 {"*Failover auth denied to* epoch * > reqConfigEpoch*"} 0 1000 10
190+
wait_for_log_messages -1 {"*has old slots configuration, sending an UPDATE message about*"} 0 1000 10
191+
wait_for_log_messages -2 {"*Failover auth denied to* epoch * > reqConfigEpoch*"} 0 1000 10
192+
wait_for_log_messages -2 {"*has old slots configuration, sending an UPDATE message about*"} 0 1000 10
193+
194+
# Make sure the replica has updated the config epoch.
195+
wait_for_condition 1000 10 {
196+
$R0_config_epoch == [dict get [cluster_get_node_by_id 1 $R0_nodeid] config_epoch]
197+
} else {
198+
fail "The replica does not update the config epoch"
199+
}
200+
201+
if {$type == "manual"} {
202+
# The second manual failure will succeed because the config epoch
203+
# has already propagated.
204+
R 3 cluster failover force
205+
}
206+
207+
# Wait for the failover to success.
208+
wait_for_condition 1000 50 {
209+
[s -3 role] == "master" &&
210+
[CI 1 cluster_state] == "ok" &&
211+
[CI 2 cluster_state] == "ok" &&
212+
[CI 3 cluster_state] == "ok"
213+
} else {
214+
fail "Failover does not happen"
215+
}
216+
217+
# Restore the old primary, make sure it can covert
218+
resume_process [srv 0 pid]
219+
wait_for_condition 1000 50 {
220+
[s 0 role] == "slave" &&
221+
[CI 0 cluster_state] == "ok"
222+
} else {
223+
fail "The old primary was not converted into replica"
224+
}
225+
}
226+
}
227+
228+
start_cluster 3 1 {tags {external:skip cluster} overrides {cluster-replica-validity-factor 0}} {
229+
test_replica_config_epoch_failover "automatic"
230+
}
231+
232+
start_cluster 3 1 {tags {external:skip cluster} overrides {cluster-replica-validity-factor 0}} {
233+
test_replica_config_epoch_failover "manual"
234+
}

0 commit comments

Comments
 (0)