2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-31 06:25:31 +00:00

Replace netievent lock-free queue with simple locked queue

The current implementation of isc_queue uses Michael-Scott lock-free
queue that in turn uses hazard pointers.  It was discovered that the way
we use the isc_queue, such complicated mechanism isn't really needed,
because most of the time, we either execute the work directly when on
nmthread (in case of UDP) or schedule the work from the matching
nmthreads.

Replace the current implementation of the isc_queue with a simple locked
ISC_LIST.  There's a slight improvement - since copying the whole list
is very lightweight - we move the queue into a new list before we start
the processing and locking just for moving the queue and not for every
single item on the list.

NOTE: There's a room for future improvements - since we don't guarantee
the order in which the netievents are processed, we could have two lists
- one unlocked that would be used when scheduling the work from the
matching thread and one locked that would be used from non-matching
thread.
This commit is contained in:
Ondřej Surý
2022-02-22 23:40:39 +01:00
parent 1bb56bb0fc
commit 6bd025942c
15 changed files with 132 additions and 815 deletions

View File

@@ -27,7 +27,6 @@
#include <isc/magic.h>
#include <isc/mem.h>
#include <isc/netmgr.h>
#include <isc/queue.h>
#include <isc/quota.h>
#include <isc/random.h>
#include <isc/refcount.h>
@@ -201,6 +200,17 @@ typedef enum {
NETIEVENT_MAX = 4,
} netievent_type_t;
typedef struct isc__nm_uvreq isc__nm_uvreq_t;
typedef struct isc__netievent isc__netievent_t;
typedef ISC_LIST(isc__netievent_t) isc__netievent_list_t;
typedef struct ievent {
isc_mutex_t lock;
isc_condition_t cond;
isc__netievent_list_t list;
} ievent_t;
/*
* Single network event loop worker.
*/
@@ -210,13 +220,10 @@ typedef struct isc__networker {
uv_loop_t loop; /* libuv loop structure */
uv_async_t async; /* async channel to send
* data to this networker */
isc_mutex_t lock;
bool paused;
bool finished;
isc_thread_t thread;
isc_queue_t *ievents[NETIEVENT_MAX];
atomic_uint_fast32_t nievents[NETIEVENT_MAX];
isc_condition_t cond_prio;
ievent_t ievents[NETIEVENT_MAX];
isc_refcount_t references;
atomic_int_fast64_t pktcount;
@@ -421,12 +428,13 @@ isc__nm_put_netievent(isc_nm_t *mgr, void *ievent);
* either in netmgr.c or matching protocol file (e.g. udp.c, tcp.c, etc.)
*/
#define NETIEVENT__SOCKET \
isc__netievent_type type; \
isc_nmsocket_t *sock; \
const char *file; \
unsigned int line; \
const char *func
#define NETIEVENT__SOCKET \
isc__netievent_type type; \
ISC_LINK(isc__netievent_t) link; \
isc_nmsocket_t *sock; \
const char *file; \
unsigned int line; \
const char *func;
typedef struct isc__netievent__socket {
NETIEVENT__SOCKET;
@@ -489,8 +497,7 @@ typedef struct isc__netievent__socket_req {
}
typedef struct isc__netievent__socket_req_result {
isc__netievent_type type;
isc_nmsocket_t *sock;
NETIEVENT__SOCKET;
isc__nm_uvreq_t *req;
isc_result_t result;
} isc__netievent__socket_req_result_t;
@@ -589,6 +596,7 @@ typedef struct isc__netievent__socket_quota {
typedef struct isc__netievent__task {
isc__netievent_type type;
ISC_LINK(isc__netievent_t) link;
isc_task_t *task;
} isc__netievent__task_t;
@@ -624,8 +632,7 @@ typedef struct isc__netievent_udpsend {
} isc__netievent_udpsend_t;
typedef struct isc__netievent_tlsconnect {
isc__netievent_type type;
isc_nmsocket_t *sock;
NETIEVENT__SOCKET;
SSL_CTX *ctx;
isc_sockaddr_t local; /* local address */
isc_sockaddr_t peer; /* peer address */
@@ -633,6 +640,7 @@ typedef struct isc__netievent_tlsconnect {
typedef struct isc__netievent {
isc__netievent_type type;
ISC_LINK(isc__netievent_t) link;
} isc__netievent_t;
#define NETIEVENT_TYPE(type) typedef isc__netievent_t isc__netievent_##type##_t;