mirror of
https://gitlab.isc.org/isc-projects/bind9
synced 2025-08-31 06:25:31 +00:00
Fix datarace when UDP/TCP connect fails and we are in nmthread
When we were in nmthread, the isc__nm_async_<proto>connect() function executes in the same thread as the isc__nm_<proto>connect() and on a failure, it would block indefinitely because the failure branch was setting sock->active to false before the condition around the wait had a chance to skip the WAIT(). This also fixes the zero system test being stuck on FreeBSD 11, so we re-enable the test in the commit.
This commit is contained in:
@@ -197,6 +197,7 @@ failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
|
||||
static isc_result_t
|
||||
tcpdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
|
||||
isc__networker_t *worker = NULL;
|
||||
isc_result_t result = ISC_R_DEFAULT;
|
||||
int r;
|
||||
|
||||
REQUIRE(VALID_NMSOCK(sock));
|
||||
@@ -221,7 +222,7 @@ tcpdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
|
||||
if (r != 0) {
|
||||
isc__nm_closesocket(sock->fd);
|
||||
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_OPENFAIL]);
|
||||
goto failure;
|
||||
goto done;
|
||||
}
|
||||
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_OPEN]);
|
||||
|
||||
@@ -234,7 +235,7 @@ tcpdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
|
||||
if (r != 0 && r != UV_EINVAL) {
|
||||
isc__nm_incstats(sock->mgr,
|
||||
sock->statsindex[STATID_BINDFAIL]);
|
||||
goto failure;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,20 +245,25 @@ tcpdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
|
||||
if (r != 0) {
|
||||
isc__nm_incstats(sock->mgr,
|
||||
sock->statsindex[STATID_CONNECTFAIL]);
|
||||
goto failure;
|
||||
goto done;
|
||||
}
|
||||
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CONNECT]);
|
||||
|
||||
atomic_store(&sock->connected, true);
|
||||
|
||||
return (ISC_R_SUCCESS);
|
||||
done:
|
||||
result = isc__nm_uverr2result(r);
|
||||
|
||||
failure:
|
||||
atomic_store(&sock->active, false);
|
||||
LOCK(&sock->lock);
|
||||
sock->result = result;
|
||||
SIGNAL(&sock->cond);
|
||||
if (!atomic_load(&sock->active)) {
|
||||
WAIT(&sock->scond, &sock->lock);
|
||||
}
|
||||
INSIST(atomic_load(&sock->active));
|
||||
UNLOCK(&sock->lock);
|
||||
|
||||
isc__nm_tcpdns_close(sock);
|
||||
|
||||
return (isc__nm_uverr2result(r));
|
||||
return (result);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -277,22 +283,12 @@ isc__nm_async_tcpdnsconnect(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
REQUIRE(sock->tid == isc_nm_tid());
|
||||
|
||||
result = tcpdns_connect_direct(sock, req);
|
||||
if (result == ISC_R_SUCCESS) {
|
||||
atomic_store(&sock->connected, true);
|
||||
/* The connect cb will be executed in tcpdns_connect_cb() */
|
||||
} else {
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
atomic_store(&sock->active, false);
|
||||
isc__nm_tcpdns_close(sock);
|
||||
isc__nm_uvreq_put(&req, sock);
|
||||
}
|
||||
|
||||
LOCK(&sock->lock);
|
||||
sock->result = result;
|
||||
SIGNAL(&sock->cond);
|
||||
if (!atomic_load(&sock->active)) {
|
||||
WAIT(&sock->scond, &sock->lock);
|
||||
}
|
||||
INSIST(atomic_load(&sock->active));
|
||||
UNLOCK(&sock->lock);
|
||||
|
||||
/*
|
||||
* The sock is now attached to the handle.
|
||||
*/
|
||||
@@ -377,7 +373,6 @@ isc_nm_tcpdnsconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
|
||||
sock = isc_mem_get(mgr->mctx, sizeof(*sock));
|
||||
isc__nmsocket_init(sock, mgr, isc_nm_tcpdnssocket, local);
|
||||
|
||||
atomic_init(&sock->active, false);
|
||||
sock->extrahandlesize = extrahandlesize;
|
||||
sock->connect_timeout = timeout;
|
||||
sock->result = ISC_R_DEFAULT;
|
||||
@@ -403,6 +398,7 @@ isc_nm_tcpdnsconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
|
||||
(isc__netievent_t *)ievent);
|
||||
isc__nm_put_netievent_tcpdnsconnect(mgr, ievent);
|
||||
} else {
|
||||
atomic_init(&sock->active, false);
|
||||
sock->tid = isc_random_uniform(mgr->nworkers);
|
||||
isc__nm_enqueue_ievent(&mgr->workers[sock->tid],
|
||||
(isc__netievent_t *)ievent);
|
||||
@@ -579,7 +575,7 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
if (r < 0) {
|
||||
isc__nm_closesocket(sock->fd);
|
||||
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_OPENFAIL]);
|
||||
goto failure;
|
||||
goto done;
|
||||
}
|
||||
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_OPEN]);
|
||||
|
||||
@@ -592,7 +588,7 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
&sock->iface->addr.type.sa, flags);
|
||||
if (r < 0) {
|
||||
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_BINDFAIL]);
|
||||
goto failure;
|
||||
goto done;
|
||||
}
|
||||
#else
|
||||
if (sock->parent->fd == -1) {
|
||||
@@ -601,7 +597,7 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
if (r < 0) {
|
||||
isc__nm_incstats(sock->mgr,
|
||||
sock->statsindex[STATID_BINDFAIL]);
|
||||
goto failure;
|
||||
goto done;
|
||||
}
|
||||
sock->parent->uv_handle.tcp.flags = sock->uv_handle.tcp.flags;
|
||||
sock->parent->fd = sock->fd;
|
||||
@@ -623,12 +619,12 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
"uv_listen failed: %s",
|
||||
isc_result_totext(isc__nm_uverr2result(r)));
|
||||
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_BINDFAIL]);
|
||||
goto failure;
|
||||
goto done;
|
||||
}
|
||||
|
||||
atomic_store(&sock->listening, true);
|
||||
|
||||
failure:
|
||||
done:
|
||||
result = isc__nm_uverr2result(r);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
sock->pquota = NULL;
|
||||
|
Reference in New Issue
Block a user