mirror of
https://gitlab.isc.org/isc-projects/bind9
synced 2025-08-31 14:35:26 +00:00
Replace locked mempools with memory contexts
Current mempools are kind of hybrid structures - they serve two purposes: 1. mempool with a lock is basically static sized allocator with pre-allocated free items 2. mempool without a lock is a doubly-linked list of preallocated items The first kind of usage could be easily replaced with jemalloc small sized arena objects and thread-local caches. The second usage not-so-much and we need to keep this (in libdns:message.c) for performance reasons.
This commit is contained in:
@@ -281,21 +281,6 @@ isc__netmgr_create(isc_mem_t *mctx, uint32_t workers, isc_nm_t **netmgrp) {
|
||||
atomic_init(&mgr->keepalive, 30000);
|
||||
atomic_init(&mgr->advertised, 30000);
|
||||
|
||||
isc_mutex_init(&mgr->reqlock);
|
||||
isc_mempool_create(mgr->mctx, sizeof(isc__nm_uvreq_t), &mgr->reqpool);
|
||||
isc_mempool_setname(mgr->reqpool, "nm_reqpool");
|
||||
isc_mempool_setfreemax(mgr->reqpool, 4096);
|
||||
isc_mempool_associatelock(mgr->reqpool, &mgr->reqlock);
|
||||
isc_mempool_setfillcount(mgr->reqpool, 32);
|
||||
|
||||
isc_mutex_init(&mgr->evlock);
|
||||
isc_mempool_create(mgr->mctx, sizeof(isc__netievent_storage_t),
|
||||
&mgr->evpool);
|
||||
isc_mempool_setname(mgr->evpool, "nm_evpool");
|
||||
isc_mempool_setfreemax(mgr->evpool, 4096);
|
||||
isc_mempool_associatelock(mgr->evpool, &mgr->evlock);
|
||||
isc_mempool_setfillcount(mgr->evpool, 32);
|
||||
|
||||
isc_barrier_init(&mgr->pausing, workers);
|
||||
isc_barrier_init(&mgr->resuming, workers);
|
||||
|
||||
@@ -377,14 +362,14 @@ nm_destroy(isc_nm_t **mgr0) {
|
||||
|
||||
/* Empty the async event queues */
|
||||
while ((ievent = DEQUEUE_PRIORITY_NETIEVENT(worker)) != NULL) {
|
||||
isc_mempool_put(mgr->evpool, ievent);
|
||||
isc_mem_put(mgr->mctx, ievent, sizeof(*ievent));
|
||||
}
|
||||
|
||||
INSIST(DEQUEUE_PRIVILEGED_NETIEVENT(worker) == NULL);
|
||||
INSIST(DEQUEUE_TASK_NETIEVENT(worker) == NULL);
|
||||
|
||||
while ((ievent = DEQUEUE_PRIORITY_NETIEVENT(worker)) != NULL) {
|
||||
isc_mempool_put(mgr->evpool, ievent);
|
||||
isc_mem_put(mgr->mctx, ievent, sizeof(*ievent));
|
||||
}
|
||||
isc_condition_destroy(&worker->cond_prio);
|
||||
|
||||
@@ -413,12 +398,6 @@ nm_destroy(isc_nm_t **mgr0) {
|
||||
isc_condition_destroy(&mgr->wkpausecond);
|
||||
isc_mutex_destroy(&mgr->lock);
|
||||
|
||||
isc_mempool_destroy(&mgr->evpool);
|
||||
isc_mutex_destroy(&mgr->evlock);
|
||||
|
||||
isc_mempool_destroy(&mgr->reqpool);
|
||||
isc_mutex_destroy(&mgr->reqlock);
|
||||
|
||||
isc_mem_put(mgr->mctx, mgr->workers,
|
||||
mgr->nworkers * sizeof(isc__networker_t));
|
||||
isc_mem_putanddetach(&mgr->mctx, mgr, sizeof(*mgr));
|
||||
@@ -1038,7 +1017,8 @@ process_queue(isc__networker_t *worker, netievent_type_t type) {
|
||||
|
||||
void *
|
||||
isc__nm_get_netievent(isc_nm_t *mgr, isc__netievent_type type) {
|
||||
isc__netievent_storage_t *event = isc_mempool_get(mgr->evpool);
|
||||
isc__netievent_storage_t *event = isc_mem_get(mgr->mctx,
|
||||
sizeof(*event));
|
||||
|
||||
*event = (isc__netievent_storage_t){ .ni.type = type };
|
||||
return (event);
|
||||
@@ -1046,7 +1026,7 @@ isc__nm_get_netievent(isc_nm_t *mgr, isc__netievent_type type) {
|
||||
|
||||
void
|
||||
isc__nm_put_netievent(isc_nm_t *mgr, void *ievent) {
|
||||
isc_mempool_put(mgr->evpool, ievent);
|
||||
isc_mem_put(mgr->mctx, ievent, sizeof(isc__netievent_storage_t));
|
||||
}
|
||||
|
||||
NETIEVENT_SOCKET_DEF(tcpclose);
|
||||
@@ -1273,7 +1253,7 @@ nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree FLARG) {
|
||||
isc_astack_destroy(sock->inactivehandles);
|
||||
|
||||
while ((uvreq = isc_astack_pop(sock->inactivereqs)) != NULL) {
|
||||
isc_mempool_put(sock->mgr->reqpool, uvreq);
|
||||
isc_mem_put(sock->mgr->mctx, uvreq, sizeof(*uvreq));
|
||||
}
|
||||
|
||||
isc_astack_destroy(sock->inactivereqs);
|
||||
@@ -2428,7 +2408,7 @@ isc___nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock FLARG) {
|
||||
}
|
||||
|
||||
if (req == NULL) {
|
||||
req = isc_mempool_get(mgr->reqpool);
|
||||
req = isc_mem_get(mgr->mctx, sizeof(*req));
|
||||
}
|
||||
|
||||
*req = (isc__nm_uvreq_t){ .magic = 0 };
|
||||
@@ -2464,7 +2444,7 @@ isc___nm_uvreq_put(isc__nm_uvreq_t **req0, isc_nmsocket_t *sock FLARG) {
|
||||
|
||||
if (!isc__nmsocket_active(sock) ||
|
||||
!isc_astack_trypush(sock->inactivereqs, req)) {
|
||||
isc_mempool_put(sock->mgr->reqpool, req);
|
||||
isc_mem_put(sock->mgr->mctx, req, sizeof(*req));
|
||||
}
|
||||
|
||||
if (handle != NULL) {
|
||||
|
Reference in New Issue
Block a user