diff --git a/CHANGES b/CHANGES index 2eaf8c2e7a..8c01f7826c 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,6 @@ +5543. [bug] Restore the UDP performance after the user netmgr + callbacks have been made asynchronous. [GL #2320] + 5542. [bug] Refactor the netmgr. [GL #1920] [GL #2034] [GL #2061] [GL #2194] [GL #2266] [GL #2283] [GL #2318] [GL #2321] [GL #2221] diff --git a/lib/isc/netmgr/netmgr-int.h b/lib/isc/netmgr/netmgr-int.h index df00fa1741..fe7fc7bd9b 100644 --- a/lib/isc/netmgr/netmgr-int.h +++ b/lib/isc/netmgr/netmgr-int.h @@ -607,10 +607,10 @@ struct isc_nm { * milliseconds so they can be used directly with the libuv timer, * but they are configured in tenths of seconds. */ - uint32_t init; - uint32_t idle; - uint32_t keepalive; - uint32_t advertised; + atomic_uint_fast32_t init; + atomic_uint_fast32_t idle; + atomic_uint_fast32_t keepalive; + atomic_uint_fast32_t advertised; #ifdef NETMGR_TRACE ISC_LIST(isc_nmsocket_t) active_sockets; diff --git a/lib/isc/netmgr/netmgr.c b/lib/isc/netmgr/netmgr.c index 8ac57b61f3..9feae4f8a4 100644 --- a/lib/isc/netmgr/netmgr.c +++ b/lib/isc/netmgr/netmgr.c @@ -218,10 +218,10 @@ isc_nm_start(isc_mem_t *mctx, uint32_t workers) { * Default TCP timeout values. * May be updated by isc_nm_tcptimeouts(). */ - mgr->init = 30000; - mgr->idle = 30000; - mgr->keepalive = 30000; - mgr->advertised = 30000; + atomic_init(&mgr->init, 30000); + atomic_init(&mgr->idle, 30000); + atomic_init(&mgr->keepalive, 30000); + atomic_init(&mgr->advertised, 30000); isc_mutex_init(&mgr->reqlock); isc_mempool_create(mgr->mctx, sizeof(isc__nm_uvreq_t), &mgr->reqpool); @@ -486,10 +486,10 @@ isc_nm_settimeouts(isc_nm_t *mgr, uint32_t init, uint32_t idle, uint32_t keepalive, uint32_t advertised) { REQUIRE(VALID_NM(mgr)); - mgr->init = init * 100; - mgr->idle = idle * 100; - mgr->keepalive = keepalive * 100; - mgr->advertised = advertised * 100; + atomic_store(&mgr->init, init * 100); + atomic_store(&mgr->idle, idle * 100); + atomic_store(&mgr->keepalive, keepalive * 100); + atomic_store(&mgr->advertised, advertised * 100); } void @@ -498,19 +498,19 @@ isc_nm_gettimeouts(isc_nm_t *mgr, uint32_t *initial, uint32_t *idle, REQUIRE(VALID_NM(mgr)); if (initial != NULL) { - *initial = mgr->init / 100; + *initial = atomic_load(&mgr->init) / 100; } if (idle != NULL) { - *idle = mgr->idle / 100; + *idle = atomic_load(&mgr->idle) / 100; } if (keepalive != NULL) { - *keepalive = mgr->keepalive / 100; + *keepalive = atomic_load(&mgr->keepalive) / 100; } if (advertised != NULL) { - *advertised = mgr->advertised / 100; + *advertised = atomic_load(&mgr->advertised) / 100; } } @@ -1784,13 +1784,15 @@ isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq, REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_NMHANDLE(uvreq->handle)); - isc__netievent_connectcb_t *ievent = isc__nm_get_netievent_connectcb( - sock->mgr, sock, uvreq, eresult); - if (eresult == ISC_R_SUCCESS) { - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__netievent_connectcb_t ievent = { .sock = sock, + .req = uvreq, + .result = eresult }; + isc__nm_async_connectcb(NULL, (isc__netievent_t *)&ievent); } else { + isc__netievent_connectcb_t *ievent = + isc__nm_get_netievent_connectcb(sock->mgr, sock, uvreq, + eresult); isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], (isc__netievent_t *)ievent); } @@ -1823,14 +1825,15 @@ isc__nm_readcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq, REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_NMHANDLE(uvreq->handle)); - isc__netievent_readcb_t *ievent = - isc__nm_get_netievent_readcb(sock->mgr, sock, uvreq, eresult); - if (eresult == ISC_R_SUCCESS) { - REQUIRE(sock->tid == isc_nm_tid()); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__netievent_readcb_t ievent = { .sock = sock, + .req = uvreq, + .result = eresult }; + + isc__nm_async_readcb(NULL, (isc__netievent_t *)&ievent); } else { + isc__netievent_readcb_t *ievent = isc__nm_get_netievent_readcb( + sock->mgr, sock, uvreq, eresult); isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], (isc__netievent_t *)ievent); } @@ -1864,14 +1867,14 @@ isc__nm_sendcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq, REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_NMHANDLE(uvreq->handle)); - isc__netievent_sendcb_t *ievent = - isc__nm_get_netievent_sendcb(sock->mgr, sock, uvreq, eresult); - if (eresult == ISC_R_SUCCESS) { - REQUIRE(sock->tid == isc_nm_tid()); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__netievent_sendcb_t ievent = { .sock = sock, + .req = uvreq, + .result = eresult }; + isc__nm_async_sendcb(NULL, (isc__netievent_t *)&ievent); } else { + isc__netievent_sendcb_t *ievent = isc__nm_get_netievent_sendcb( + sock->mgr, sock, uvreq, eresult); isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], (isc__netievent_t *)ievent); } diff --git a/lib/isc/netmgr/tcp.c b/lib/isc/netmgr/tcp.c index 0433da859d..b6c25dafad 100644 --- a/lib/isc/netmgr/tcp.c +++ b/lib/isc/netmgr/tcp.c @@ -790,9 +790,10 @@ isc__nm_tcp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { sock->recv_cbarg = cbarg; sock->recv_read = true; if (sock->read_timeout == 0) { - sock->read_timeout = (atomic_load(&sock->keepalive) - ? sock->mgr->keepalive - : sock->mgr->idle); + sock->read_timeout = + (atomic_load(&sock->keepalive) + ? atomic_load(&sock->mgr->keepalive) + : atomic_load(&sock->mgr->idle)); } ievent = isc__nm_get_netievent_tcpstartread(sock->mgr, sock); @@ -959,9 +960,10 @@ read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) { req->uvbuf.len = nread; if (!atomic_load(&sock->client)) { - sock->read_timeout = (atomic_load(&sock->keepalive) - ? sock->mgr->keepalive - : sock->mgr->idle); + sock->read_timeout = + (atomic_load(&sock->keepalive) + ? atomic_load(&sock->mgr->keepalive) + : atomic_load(&sock->mgr->idle)); } isc__nm_readcb(sock, req, ISC_R_SUCCESS); @@ -1100,7 +1102,7 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { isc__nm_incstats(csock->mgr, csock->statsindex[STATID_ACCEPT]); - csock->read_timeout = csock->mgr->init; + csock->read_timeout = atomic_load(&csock->mgr->init); atomic_fetch_add(&ssock->parent->active_child_connections, 1); diff --git a/lib/isc/netmgr/tcpdns.c b/lib/isc/netmgr/tcpdns.c index 8cc0c4a654..0b976ecdc5 100644 --- a/lib/isc/netmgr/tcpdns.c +++ b/lib/isc/netmgr/tcpdns.c @@ -846,9 +846,10 @@ isc__nm_tcpdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { sock->recv_cbarg = cbarg; sock->recv_read = true; if (sock->read_timeout == 0) { - sock->read_timeout = (atomic_load(&sock->keepalive) - ? sock->mgr->keepalive - : sock->mgr->idle); + sock->read_timeout = + (atomic_load(&sock->keepalive) + ? atomic_load(&sock->mgr->keepalive) + : atomic_load(&sock->mgr->idle)); } ievent = isc__nm_get_netievent_tcpdnsread(sock->mgr, sock); @@ -1037,7 +1038,7 @@ read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) { sock->buf_len += len; if (!atomic_load(&sock->client)) { - sock->read_timeout = sock->mgr->idle; + sock->read_timeout = atomic_load(&sock->mgr->idle); } process_sock_buffer(sock); @@ -1182,7 +1183,7 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { isc__nm_incstats(csock->mgr, csock->statsindex[STATID_ACCEPT]); - csock->read_timeout = csock->mgr->init; + csock->read_timeout = atomic_load(&csock->mgr->init); csock->closehandle_cb = resume_processing; @@ -1199,8 +1200,8 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { * reads. */ csock->read_timeout = (atomic_load(&csock->keepalive) - ? csock->mgr->keepalive - : csock->mgr->idle); + ? atomic_load(&csock->mgr->keepalive) + : atomic_load(&csock->mgr->idle)); isc_nmhandle_detach(&handle); diff --git a/lib/isc/netmgr/tls.c b/lib/isc/netmgr/tls.c index 1d80abcf47..f685493f38 100644 --- a/lib/isc/netmgr/tls.c +++ b/lib/isc/netmgr/tls.c @@ -336,7 +336,7 @@ tlslisten_acceptcb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { isc__nmsocket_attach(tlslistensock, &tlssock->listener); isc_nmhandle_attach(handle, &tlssock->outerhandle); tlssock->peer = handle->sock->peer; - tlssock->read_timeout = handle->sock->mgr->init; + tlssock->read_timeout = atomic_load(&handle->sock->mgr->init); tlssock->tid = isc_nm_tid(); tlssock->tls.server = true; tlssock->tls.state = TLS_INIT; diff --git a/lib/isc/netmgr/tlsdns.c b/lib/isc/netmgr/tlsdns.c index b56b5ea160..7359e0f452 100644 --- a/lib/isc/netmgr/tlsdns.c +++ b/lib/isc/netmgr/tlsdns.c @@ -156,7 +156,7 @@ dnslisten_acceptcb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { isc_nmhandle_attach(handle, &dnssock->outerhandle); dnssock->peer = handle->sock->peer; - dnssock->read_timeout = handle->sock->mgr->init; + dnssock->read_timeout = atomic_load(&handle->sock->mgr->init); dnssock->tid = isc_nm_tid(); dnssock->closehandle_cb = resume_processing; @@ -329,8 +329,8 @@ dnslisten_readcb(isc_nmhandle_t *handle, isc_result_t eresult, dnssock->buf_len += len; dnssock->read_timeout = (atomic_load(&dnssock->keepalive) - ? dnssock->mgr->keepalive - : dnssock->mgr->idle); + ? atomic_load(&dnssock->mgr->keepalive) + : atomic_load(&dnssock->mgr->idle)); do { isc_result_t result; @@ -754,7 +754,7 @@ tlsdnsconnect_cb(isc_nmhandle_t *handle, isc_result_t result, void *arg) { isc_nmhandle_attach(handle, &dnssock->outerhandle); dnssock->peer = handle->sock->peer; - dnssock->read_timeout = handle->sock->mgr->init; + dnssock->read_timeout = atomic_load(&handle->sock->mgr->init); dnssock->tid = isc_nm_tid(); atomic_init(&dnssock->client, true); @@ -852,8 +852,8 @@ isc__nm_tlsdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { sock->recv_cbarg = cbarg; sock->read_timeout = (atomic_load(&sock->keepalive) - ? sock->mgr->keepalive - : sock->mgr->idle); + ? atomic_load(&sock->mgr->keepalive) + : atomic_load(&sock->mgr->idle)); /* * Add a reference to the handle to keep it from being freed by diff --git a/lib/isc/netmgr/udp.c b/lib/isc/netmgr/udp.c index 22055c3382..726efd6f0f 100644 --- a/lib/isc/netmgr/udp.c +++ b/lib/isc/netmgr/udp.c @@ -454,7 +454,6 @@ isc__nm_udp_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb, isc_nmsocket_t *sock = handle->sock; isc_nmsocket_t *psock = NULL, *rsock = sock; isc_sockaddr_t *peer = &handle->peer; - isc__netievent_udpsend_t *ievent = NULL; isc__nm_uvreq_t *uvreq = NULL; uint32_t maxudp = atomic_load(&sock->mgr->maxudp); int ntid; @@ -512,23 +511,14 @@ isc__nm_udp_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb, } if (isc_nm_tid() == rsock->tid) { - /* - * If we're in the same thread as the socket we can send - * the data directly, but we still need to return errors - * via the callback for API consistency. - */ - isc_result_t result = udp_send_direct(rsock, uvreq, peer); - if (result != ISC_R_SUCCESS) { - isc__nm_incstats(rsock->mgr, - rsock->statsindex[STATID_SENDFAIL]); - failed_send_cb(rsock, uvreq, result); - } + isc__netievent_udpsend_t ievent = { .sock = rsock, + .req = uvreq, + .peer = *peer }; + + isc__nm_async_udpsend(NULL, (isc__netievent_t *)&ievent); } else { - /* - * We need to create an event and pass it using async - * channel - */ - ievent = isc__nm_get_netievent_udpsend(sock->mgr, rsock); + isc__netievent_udpsend_t *ievent = + isc__nm_get_netievent_udpsend(sock->mgr, rsock); ievent->peer = *peer; ievent->req = uvreq; @@ -551,7 +541,7 @@ isc__nm_async_udpsend(isc__networker_t *worker, isc__netievent_t *ev0) { REQUIRE(sock->tid == isc_nm_tid()); UNUSED(worker); - if (!isc__nmsocket_active(ievent->sock)) { + if (inactive(sock)) { failed_send_cb(sock, uvreq, ISC_R_CANCELED); return; } @@ -1000,7 +990,6 @@ isc__nm_udp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { REQUIRE(VALID_NMSOCK(handle->sock)); isc_nmsocket_t *sock = handle->sock; - isc__netievent_udpread_t *ievent = NULL; REQUIRE(sock->type == isc_nm_udpsocket); REQUIRE(sock->statichandle == handle); @@ -1011,14 +1000,14 @@ isc__nm_udp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { sock->recv_cbarg = cbarg; sock->recv_read = true; - ievent = isc__nm_get_netievent_udpread(sock->mgr, sock); - - if (sock->reading) { + if (!sock->reading && sock->tid == isc_nm_tid()) { + isc__netievent_udpread_t ievent = { .sock = sock }; + isc__nm_async_udpread(NULL, (isc__netievent_t *)&ievent); + } else { + isc__netievent_udpread_t *ievent = + isc__nm_get_netievent_udpread(sock->mgr, sock); isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], (isc__netievent_t *)ievent); - } else { - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); } }