Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 30 additions & 1 deletion src/cluster_legacy.c
Original file line number Diff line number Diff line change
Expand Up @@ -7049,7 +7049,36 @@ int clusterCommandSpecial(client *c) {
clusterDelNode(n);
clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG);
addReply(c, shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr, "replicate") && c->argc == 3) {
} else if (!strcasecmp(c->argv[1]->ptr, "replicate") && (c->argc == 3 || c->argc == 4)) {
/* CLUSTER REPLICATE (<NODE ID> | NO ONE)*/
if (c->argc == 4) {
/* CLUSTER REPLICATE NO ONE */
if (strcasecmp(c->argv[2]->ptr, "NO") != 0 || strcasecmp(c->argv[3]->ptr, "ONE") != 0) {
addReplySubcommandSyntaxError(c);
return 1;
}
if (nodeIsPrimary(myself)) {
addReply(c, shared.ok);
return 1;
}
sds client = catClientInfoShortString(sdsempty(), c, server.hide_user_data_from_log);
serverLog(LL_NOTICE, "Stop replication and turning myself into empty primary (request from '%s').", client);
sdsfree(client);
clusterSetNodeAsPrimary(myself);
clusterPromoteSelfToPrimary();
flushAllDataAndResetRDB(server.repl_replica_lazy_flush ? EMPTYDB_ASYNC : EMPTYDB_NO_FLAGS);
clusterCloseAllSlots();
resetManualFailover();

/* Moving new primary to its own shard. */
char new_shard_id[CLUSTER_NAMELEN];
getRandomHexChars(new_shard_id, CLUSTER_NAMELEN);
updateShardId(myself, new_shard_id);

clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_BROADCAST_ALL);
addReply(c, shared.ok);
return 1;
}
/* CLUSTER REPLICATE <NODE ID> */
/* Lookup the specified node in our table. */
clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr));
Expand Down
20 changes: 17 additions & 3 deletions src/commands.def
Original file line number Diff line number Diff line change
Expand Up @@ -768,7 +768,9 @@ struct COMMAND_ARG CLUSTER_REPLICAS_Args[] = {

#ifndef SKIP_CMD_HISTORY_TABLE
/* CLUSTER REPLICATE history */
#define CLUSTER_REPLICATE_History NULL
commandHistory CLUSTER_REPLICATE_History[] = {
{"9.0.0","Added support of 'NO ONE' arg instead of <node-id> resulting into detaching replica from primary node."},
};
#endif

#ifndef SKIP_CMD_TIPS_TABLE
Expand All @@ -781,9 +783,21 @@ struct COMMAND_ARG CLUSTER_REPLICAS_Args[] = {
#define CLUSTER_REPLICATE_Keyspecs NULL
#endif

/* CLUSTER REPLICATE args no_one argument table */
struct COMMAND_ARG CLUSTER_REPLICATE_args_no_one_Subargs[] = {
{MAKE_ARG("no",ARG_TYPE_PURE_TOKEN,-1,"NO",NULL,NULL,CMD_ARG_NONE,0,NULL)},
{MAKE_ARG("one",ARG_TYPE_PURE_TOKEN,-1,"ONE",NULL,NULL,CMD_ARG_NONE,0,NULL)},
};

/* CLUSTER REPLICATE args argument table */
struct COMMAND_ARG CLUSTER_REPLICATE_args_Subargs[] = {
{MAKE_ARG("node-id",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
{MAKE_ARG("no-one",ARG_TYPE_BLOCK,-1,NULL,NULL,"9.0.0",CMD_ARG_NONE,2,NULL),.subargs=CLUSTER_REPLICATE_args_no_one_Subargs},
};

/* CLUSTER REPLICATE argument table */
struct COMMAND_ARG CLUSTER_REPLICATE_Args[] = {
{MAKE_ARG("node-id",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
{MAKE_ARG("args",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_NONE,2,NULL),.subargs=CLUSTER_REPLICATE_args_Subargs},
};

/********** CLUSTER RESET ********************/
Expand Down Expand Up @@ -1024,7 +1038,7 @@ struct COMMAND_STRUCT CLUSTER_Subcommands[] = {
{MAKE_CMD("myshardid","Returns the shard ID of a node.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYSHARDID_History,0,CLUSTER_MYSHARDID_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_MYSHARDID_Keyspecs,0,NULL,0)},
{MAKE_CMD("nodes","Returns the cluster configuration for a node.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_NODES_History,0,CLUSTER_NODES_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_NODES_Keyspecs,0,NULL,0)},
{MAKE_CMD("replicas","Lists the replica nodes of a primary node.","O(N) where N is the number of replicas.","5.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICAS_History,0,CLUSTER_REPLICAS_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICAS_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICAS_Args},
{MAKE_CMD("replicate","Configure a node as replica of a primary node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICATE_History,0,CLUSTER_REPLICATE_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICATE_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICATE_Args},
{MAKE_CMD("replicate","Configure a node as replica of a primary node or detach a replica from its primary.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICATE_History,1,CLUSTER_REPLICATE_Tips,0,clusterCommand,-3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICATE_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICATE_Args},
{MAKE_CMD("reset","Resets a node.","O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect.","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_RESET_History,0,CLUSTER_RESET_Tips,0,clusterCommand,-2,CMD_ADMIN|CMD_STALE|CMD_NOSCRIPT,0,CLUSTER_RESET_Keyspecs,0,NULL,1),.args=CLUSTER_RESET_Args},
{MAKE_CMD("saveconfig","Forces a node to save the cluster configuration to disk.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SAVECONFIG_History,0,CLUSTER_SAVECONFIG_Tips,0,clusterCommand,2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_SAVECONFIG_Keyspecs,0,NULL,0)},
{MAKE_CMD("set-config-epoch","Sets the configuration epoch for a new node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SET_CONFIG_EPOCH_History,0,CLUSTER_SET_CONFIG_EPOCH_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_SET_CONFIG_EPOCH_Keyspecs,0,NULL,1),.args=CLUSTER_SET_CONFIG_EPOCH_Args},
Expand Down
37 changes: 33 additions & 4 deletions src/commands/cluster-replicate.json
Original file line number Diff line number Diff line change
@@ -1,21 +1,50 @@
{
"REPLICATE": {
"summary": "Configure a node as replica of a primary node.",
"summary": "Configure a node as replica of a primary node or detach a replica from its primary.",
"complexity": "O(1)",
"group": "cluster",
"since": "3.0.0",
"arity": 3,
"arity": -3,
"container": "CLUSTER",
"function": "clusterCommand",
"history": [
[
"9.0.0",
"Added support of 'NO ONE' arg instead of <node-id> resulting into detaching replica from primary node."
]
],
"command_flags": [
"NO_ASYNC_LOADING",
"ADMIN",
"STALE"
],
"arguments": [
{
"name": "node-id",
"type": "string"
"name": "args",
"type": "oneof",
"arguments": [
{
"name": "node-id",
"type": "string"
},
{
"name": "no-one",
"type": "block",
"since": "9.0.0",
"arguments": [
{
"name": "no",
"type": "pure-token",
"token": "NO"
},
{
"name": "one",
"type": "pure-token",
"token": "ONE"
}
]
}
]
}
],
"reply_schema": {
Expand Down
85 changes: 85 additions & 0 deletions tests/unit/cluster/replica-detach.tcl
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
proc find_slots_for_node {node_idx node_id_to_find} {
set shards_cfg [R $node_idx CLUSTER SHARDS]
foreach shard_cfg $shards_cfg {
set nodes [dict get $shard_cfg nodes]
foreach node $nodes {
if {[dict get $node id] eq $node_id_to_find} {
return [dict get $shard_cfg slots]
}
}
}
return [list]
}

proc find_primary_node {number_of_shards shard_id} {
# primary indexes: [0..$number_of_shards-1]
for {set i 0} {$i < $number_of_shards} {incr i} {
if {[R $i CLUSTER MYSHARDID] eq $shard_id} {
return $i
}
}
fail "Can't find shard $shard_id across $number_of_shards primaries."
}

set number_of_shards 3
start_cluster $number_of_shards $number_of_shards {tags {external:skip cluster}} {
test "Cluster should start ok" {
wait_for_cluster_state ok
}

test "CLUSTER REPLICATE NO ONE should turn node into empty primary" {
upvar number_of_shards number_of_shards
# primary indexes: [0..$number_of_shards-1]
# replica indexes: [$number_of_shards..2*$number_of_shards-1]
# We are just taking first replica index for testing.
set replica_node $number_of_shards
set replica_node_id [R $replica_node CLUSTER MYID]
set shard_id [R $replica_node CLUSTER MYSHARDID]
# make sure node is replica
wait_for_condition 100 100 {
[string first "slave" [R $replica_node ROLE]] >= 0
} else {
puts "R $replica_node ROLE: [R $replica_node ROLE]"
fail "R $replica_node didn't assume replica role in time"
}
# Adding some data to replica's shard
set primary_node [find_primary_node $number_of_shards $shard_id]
wait_for_condition 100 100 {
([catch {R $primary_node SET [randomKey] "some-value"} resp] == 0) && ($resp eq {OK})
} else {
puts "R $primary_node SCAN 0: [R $primary_node SCAN 0]"
fail "Failed to add some key to primary node $primary_node in shard $shard_id."
}
# and wait for data is replicated to replica
wait_for_condition 100 100 {
[R $replica_node DBSIZE] > 0
} else {
puts "R $replica_node DBSIZE: [R $replica_node DBSIZE]"
fail "Replica $replica_node didn't replicate data from primary."
}

assert_equal "OK" [R $replica_node CLUSTER REPLICATE NO ONE]

# make sure node is turned into primary
wait_for_condition 100 100 {
[string first "master" [R $replica_node ROLE]] >= 0
} else {
puts "R $replica_node ROLE: [R $replica_node ROLE]"
fail "R $replica_node didn't assume primary role in time"
}

# make sure shard id is changed
assert_not_equal $shard_id [R $replica_node CLUSTER MYSHARDID]
assert_equal 0 [R $replica_node DBSIZE]

# checking that new primary has no slots across all nodes
foreach node_idx {0 1 2 3 4 5} {
wait_for_condition 100 100 {
[llength [find_slots_for_node $node_idx $replica_node_id]] == 0
} else {
puts "R $node_idx still returns node $replica_node_id owning slots: [find_slots_for_node $node_idx $replica_node_id]"
fail "R $node_idx didn't refresh topology after detaching replica $replica_node/$replica_node_id"
}
}
}
}
Loading