2
0
mirror of https://github.com/openvswitch/ovs synced 2025-09-01 14:55:18 +00:00

ovsdb: raft: Fix inability to join the cluster after interrupted attempt.

If the joining server re-connects while catching up (e.g. if it crashed
or connection got closed due to inactivity), the data we sent might be
lost, so the server will never reply to append request or a snapshot
installation request.  At the same time, leader will decline all the
subsequent requests to join from that server with the 'in progress'
resolution.  At this point the new server will never be able to join
the cluster, because it will never receive the raft log while leader
thinks that it was already sent.

This happened in practice when one of the servers got preempted for a
few seconds, so the leader closed connection due to inactivity.

Destroying the joining server if disconnection detected.  This will
allow to start the joining from scratch when the server re-connects
and sends the new join request.

We can't track re-connection in the raft_conn_run(), because it's
incoming connection and the jsonrpc will not keep it alive or
try to reconnect.  Next time the server re-connects it will be an
entirely new raft conn.

Fixes: 1b1d2e6daa ("ovsdb: Introduce experimental support for clustered databases.")
Reported-at: https://bugzilla.redhat.com/2033514
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Acked-by: Dumitru Ceara <dceara@redhat.com>
This commit is contained in:
Ilya Maximets
2022-01-28 19:51:21 +01:00
parent 6de8868d19
commit 999ba294fb
2 changed files with 86 additions and 7 deletions

View File

@@ -74,6 +74,7 @@ enum raft_failure_test {
FT_CRASH_BEFORE_SEND_EXEC_REQ,
FT_CRASH_AFTER_SEND_EXEC_REQ,
FT_CRASH_AFTER_RECV_APPEND_REQ_UPDATE,
FT_CRASH_BEFORE_SEND_SNAPSHOT_REP,
FT_DELAY_ELECTION,
FT_DONT_SEND_VOTE_REQUEST,
FT_STOP_RAFT_RPC,
@@ -379,12 +380,19 @@ static bool raft_handle_write_error(struct raft *, struct ovsdb_error *);
static void raft_run_reconfigure(struct raft *);
static void raft_set_leader(struct raft *, const struct uuid *sid);
static struct raft_server *
raft_find_server(const struct raft *raft, const struct uuid *sid)
{
return raft_server_find(&raft->servers, sid);
}
static struct raft_server *
raft_find_new_server(struct raft *raft, const struct uuid *uuid)
{
return raft_server_find(&raft->add_servers, uuid);
}
static char *
raft_make_address_passive(const char *address_)
{
@@ -1867,6 +1875,8 @@ raft_open_conn(struct raft *raft, const char *address, const struct uuid *sid)
static void
raft_conn_close(struct raft_conn *conn)
{
VLOG_DBG("closing connection to server %s (%s)",
conn->nickname, jsonrpc_session_get_name(conn->js));
jsonrpc_session_close(conn->js);
ovs_list_remove(&conn->list_node);
free(conn->nickname);
@@ -1957,16 +1967,30 @@ raft_run(struct raft *raft)
}
/* Close unneeded sessions. */
struct raft_server *server;
struct raft_conn *next;
LIST_FOR_EACH_SAFE (conn, next, list_node, &raft->conns) {
if (!raft_conn_should_stay_open(raft, conn)) {
server = raft_find_new_server(raft, &conn->sid);
if (server) {
/* We only have one incoming connection from joining servers,
* so if it's closed, we need to destroy the record about the
* server. This way the process can be started over on the
* next join request. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
VLOG_INFO_RL(&rl, "cluster "CID_FMT": server %s (%s) "
"disconnected while joining",
CID_ARGS(&raft->cid),
server->nickname, server->address);
hmap_remove(&raft->add_servers, &server->hmap_node);
raft_server_destroy(server);
}
raft->n_disconnections++;
raft_conn_close(conn);
}
}
/* Open needed sessions. */
struct raft_server *server;
HMAP_FOR_EACH (server, hmap_node, &raft->servers) {
raft_open_conn(raft, server->address, &server->sid);
}
@@ -3354,12 +3378,6 @@ raft_find_peer(struct raft *raft, const struct uuid *uuid)
return s && !uuid_equals(&raft->sid, &s->sid) ? s : NULL;
}
static struct raft_server *
raft_find_new_server(struct raft *raft, const struct uuid *uuid)
{
return raft_server_find(&raft->add_servers, uuid);
}
/* Figure 3.1: "If there exists an N such that N > commitIndex, a
* majority of matchIndex[i] >= N, and log[N].term == currentTerm, set
* commitIndex = N (sections 3.5 and 3.6)." */
@@ -4142,6 +4160,10 @@ static void
raft_handle_install_snapshot_request(
struct raft *raft, const struct raft_install_snapshot_request *rq)
{
if (failure_test == FT_CRASH_BEFORE_SEND_SNAPSHOT_REP) {
ovs_fatal(0, "Raft test: crash before sending install_snapshot_reply");
}
if (raft_handle_install_snapshot_request__(raft, rq)) {
union raft_rpc rpy = {
.install_snapshot_reply = {
@@ -4940,6 +4962,8 @@ raft_unixctl_failure_test(struct unixctl_conn *conn OVS_UNUSED,
failure_test = FT_CRASH_AFTER_SEND_EXEC_REQ;
} else if (!strcmp(test, "crash-after-receiving-append-request-update")) {
failure_test = FT_CRASH_AFTER_RECV_APPEND_REQ_UPDATE;
} else if (!strcmp(test, "crash-before-sending-install-snapshot-reply")) {
failure_test = FT_CRASH_BEFORE_SEND_SNAPSHOT_REP;
} else if (!strcmp(test, "delay-election")) {
failure_test = FT_DELAY_ELECTION;
struct raft *raft;

View File

@@ -400,6 +400,61 @@ done
AT_CLEANUP
AT_BANNER([OVSDB - cluster failure while joining])
AT_SETUP([OVSDB cluster - follower crash while joining])
AT_KEYWORDS([ovsdb server negative unix cluster join])
n=3
schema_name=`ovsdb-tool schema-name $abs_srcdir/idltest.ovsschema`
ordinal_schema > schema
AT_CHECK([ovsdb-tool '-vPATTERN:console:%c|%p|%m' create-cluster s1.db dnl
$abs_srcdir/idltest.ovsschema unix:s1.raft], [0], [], [stderr])
cid=`ovsdb-tool db-cid s1.db`
schema_name=`ovsdb-tool schema-name $abs_srcdir/idltest.ovsschema`
for i in `seq 2 $n`; do
AT_CHECK([ovsdb-tool join-cluster s$i.db $schema_name unix:s$i.raft unix:s1.raft])
done
on_exit 'kill `cat *.pid`'
dnl Starting followers first, so we can configure them to crash on join.
for j in `seq $n`; do
i=$(($n + 1 - $j))
AT_CHECK([ovsdb-server -v -vconsole:off -vsyslog:off dnl
--detach --no-chdir --log-file=s$i.log dnl
--pidfile=s$i.pid --unixctl=s$i dnl
--remote=punix:s$i.ovsdb s$i.db])
if test $i != 1; then
OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/s$i dnl
cluster/failure-test crash-before-sending-install-snapshot-reply dnl
| grep -q "engaged"])
fi
done
dnl Make sure that followers really crashed.
for i in `seq 2 $n`; do
OVS_WAIT_WHILE([test -s s$i.pid])
done
dnl Bring them back.
for i in `seq 2 $n`; do
AT_CHECK([ovsdb-server -v -vconsole:off -vsyslog:off dnl
--detach --no-chdir --log-file=s$i.log dnl
--pidfile=s$i.pid --unixctl=s$i dnl
--remote=punix:s$i.ovsdb s$i.db])
done
dnl Make sure that all servers joined the cluster.
for i in `seq $n`; do
AT_CHECK([ovsdb_client_wait unix:s$i.ovsdb $schema_name connected])
done
for i in `seq $n`; do
OVS_APP_EXIT_AND_WAIT_BY_TARGET([`pwd`/s$i], [s$i.pid])
done
AT_CLEANUP
OVS_START_SHELL_HELPERS