2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-09-01 15:05:23 +00:00

Avoid netievent allocations when the callbacks can be called directly

After turning the users callbacks to be asynchronous, there was a
visible performance drop.  This commit prevents the unnecessary
allocations while keeping the code paths same for both asynchronous and
synchronous calls.

The same change was done to the isc__nm_udp_{read,send} as those two
functions are in the hot path.
This commit is contained in:
Ondřej Surý
2020-12-02 08:54:51 +01:00
parent 886eb5722d
commit d6d2fbe0e9
2 changed files with 34 additions and 42 deletions

View File

@@ -1784,13 +1784,15 @@ isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_UVREQ(uvreq));
REQUIRE(VALID_NMHANDLE(uvreq->handle)); REQUIRE(VALID_NMHANDLE(uvreq->handle));
isc__netievent_connectcb_t *ievent = isc__nm_get_netievent_connectcb(
sock->mgr, sock, uvreq, eresult);
if (eresult == ISC_R_SUCCESS) { if (eresult == ISC_R_SUCCESS) {
isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], isc__netievent_connectcb_t ievent = { .sock = sock,
(isc__netievent_t *)ievent); .req = uvreq,
.result = eresult };
isc__nm_async_connectcb(NULL, (isc__netievent_t *)&ievent);
} else { } else {
isc__netievent_connectcb_t *ievent =
isc__nm_get_netievent_connectcb(sock->mgr, sock, uvreq,
eresult);
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }
@@ -1823,14 +1825,15 @@ isc__nm_readcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_UVREQ(uvreq));
REQUIRE(VALID_NMHANDLE(uvreq->handle)); REQUIRE(VALID_NMHANDLE(uvreq->handle));
isc__netievent_readcb_t *ievent =
isc__nm_get_netievent_readcb(sock->mgr, sock, uvreq, eresult);
if (eresult == ISC_R_SUCCESS) { if (eresult == ISC_R_SUCCESS) {
REQUIRE(sock->tid == isc_nm_tid()); isc__netievent_readcb_t ievent = { .sock = sock,
isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], .req = uvreq,
(isc__netievent_t *)ievent); .result = eresult };
isc__nm_async_readcb(NULL, (isc__netievent_t *)&ievent);
} else { } else {
isc__netievent_readcb_t *ievent = isc__nm_get_netievent_readcb(
sock->mgr, sock, uvreq, eresult);
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }
@@ -1864,14 +1867,14 @@ isc__nm_sendcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_UVREQ(uvreq));
REQUIRE(VALID_NMHANDLE(uvreq->handle)); REQUIRE(VALID_NMHANDLE(uvreq->handle));
isc__netievent_sendcb_t *ievent =
isc__nm_get_netievent_sendcb(sock->mgr, sock, uvreq, eresult);
if (eresult == ISC_R_SUCCESS) { if (eresult == ISC_R_SUCCESS) {
REQUIRE(sock->tid == isc_nm_tid()); isc__netievent_sendcb_t ievent = { .sock = sock,
isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], .req = uvreq,
(isc__netievent_t *)ievent); .result = eresult };
isc__nm_async_sendcb(NULL, (isc__netievent_t *)&ievent);
} else { } else {
isc__netievent_sendcb_t *ievent = isc__nm_get_netievent_sendcb(
sock->mgr, sock, uvreq, eresult);
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }

View File

@@ -454,7 +454,6 @@ isc__nm_udp_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
isc_nmsocket_t *sock = handle->sock; isc_nmsocket_t *sock = handle->sock;
isc_nmsocket_t *psock = NULL, *rsock = sock; isc_nmsocket_t *psock = NULL, *rsock = sock;
isc_sockaddr_t *peer = &handle->peer; isc_sockaddr_t *peer = &handle->peer;
isc__netievent_udpsend_t *ievent = NULL;
isc__nm_uvreq_t *uvreq = NULL; isc__nm_uvreq_t *uvreq = NULL;
uint32_t maxudp = atomic_load(&sock->mgr->maxudp); uint32_t maxudp = atomic_load(&sock->mgr->maxudp);
int ntid; int ntid;
@@ -512,23 +511,14 @@ isc__nm_udp_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
} }
if (isc_nm_tid() == rsock->tid) { if (isc_nm_tid() == rsock->tid) {
/* isc__netievent_udpsend_t ievent = { .sock = rsock,
* If we're in the same thread as the socket we can send .req = uvreq,
* the data directly, but we still need to return errors .peer = *peer };
* via the callback for API consistency.
*/ isc__nm_async_udpsend(NULL, (isc__netievent_t *)&ievent);
isc_result_t result = udp_send_direct(rsock, uvreq, peer);
if (result != ISC_R_SUCCESS) {
isc__nm_incstats(rsock->mgr,
rsock->statsindex[STATID_SENDFAIL]);
failed_send_cb(rsock, uvreq, result);
}
} else { } else {
/* isc__netievent_udpsend_t *ievent =
* We need to create an event and pass it using async isc__nm_get_netievent_udpsend(sock->mgr, rsock);
* channel
*/
ievent = isc__nm_get_netievent_udpsend(sock->mgr, rsock);
ievent->peer = *peer; ievent->peer = *peer;
ievent->req = uvreq; ievent->req = uvreq;
@@ -551,7 +541,7 @@ isc__nm_async_udpsend(isc__networker_t *worker, isc__netievent_t *ev0) {
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
UNUSED(worker); UNUSED(worker);
if (!isc__nmsocket_active(ievent->sock)) { if (inactive(sock)) {
failed_send_cb(sock, uvreq, ISC_R_CANCELED); failed_send_cb(sock, uvreq, ISC_R_CANCELED);
return; return;
} }
@@ -1000,7 +990,6 @@ isc__nm_udp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) {
REQUIRE(VALID_NMSOCK(handle->sock)); REQUIRE(VALID_NMSOCK(handle->sock));
isc_nmsocket_t *sock = handle->sock; isc_nmsocket_t *sock = handle->sock;
isc__netievent_udpread_t *ievent = NULL;
REQUIRE(sock->type == isc_nm_udpsocket); REQUIRE(sock->type == isc_nm_udpsocket);
REQUIRE(sock->statichandle == handle); REQUIRE(sock->statichandle == handle);
@@ -1011,14 +1000,14 @@ isc__nm_udp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) {
sock->recv_cbarg = cbarg; sock->recv_cbarg = cbarg;
sock->recv_read = true; sock->recv_read = true;
ievent = isc__nm_get_netievent_udpread(sock->mgr, sock); if (!sock->reading && sock->tid == isc_nm_tid()) {
isc__netievent_udpread_t ievent = { .sock = sock };
if (sock->reading) { isc__nm_async_udpread(NULL, (isc__netievent_t *)&ievent);
} else {
isc__netievent_udpread_t *ievent =
isc__nm_get_netievent_udpread(sock->mgr, sock);
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} else {
isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent);
} }
} }