2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
|
|
*
|
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
|
|
|
*/
|
|
|
|
|
2019-11-25 09:10:29 +01:00
|
|
|
#pragma once
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <uv.h>
|
|
|
|
|
|
|
|
#include <isc/astack.h>
|
|
|
|
#include <isc/atomic.h>
|
|
|
|
#include <isc/buffer.h>
|
|
|
|
#include <isc/condition.h>
|
|
|
|
#include <isc/magic.h>
|
|
|
|
#include <isc/mem.h>
|
|
|
|
#include <isc/netmgr.h>
|
|
|
|
#include <isc/queue.h>
|
2020-03-24 13:38:51 +01:00
|
|
|
#include <isc/quota.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/random.h>
|
|
|
|
#include <isc/refcount.h>
|
|
|
|
#include <isc/region.h>
|
|
|
|
#include <isc/result.h>
|
|
|
|
#include <isc/sockaddr.h>
|
2020-01-05 01:02:12 -08:00
|
|
|
#include <isc/stats.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/thread.h>
|
|
|
|
#include <isc/util.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
|
2020-01-07 12:02:41 +01:00
|
|
|
#include "uv-compat.h"
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
#define ISC_NETMGR_TID_UNKNOWN -1
|
|
|
|
|
2020-01-29 13:16:04 +01:00
|
|
|
#if !defined(WIN32)
|
|
|
|
/*
|
|
|
|
* New versions of libuv support recvmmsg on unices.
|
|
|
|
* Since recvbuf is only allocated per worker allocating a bigger one is not
|
|
|
|
* that wasteful.
|
|
|
|
* 20 here is UV__MMSG_MAXWIDTH taken from the current libuv source, nothing
|
|
|
|
* will break if the original value changes.
|
|
|
|
*/
|
|
|
|
#define ISC_NETMGR_RECVBUF_SIZE (20 * 65536)
|
|
|
|
#else
|
|
|
|
#define ISC_NETMGR_RECVBUF_SIZE (65536)
|
|
|
|
#endif
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* Single network event loop worker.
|
|
|
|
*/
|
|
|
|
typedef struct isc__networker {
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_nm_t *mgr;
|
|
|
|
int id; /* thread id */
|
|
|
|
uv_loop_t loop; /* libuv loop structure */
|
2020-02-12 13:59:18 +01:00
|
|
|
uv_async_t async; /* async channel to send
|
|
|
|
* data to this networker */
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_mutex_t lock;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_condition_t cond;
|
2020-02-14 08:14:03 +01:00
|
|
|
bool paused;
|
|
|
|
bool finished;
|
|
|
|
isc_thread_t thread;
|
|
|
|
isc_queue_t *ievents; /* incoming async events */
|
|
|
|
isc_queue_t *ievents_prio; /* priority async events
|
|
|
|
* used for listening etc.
|
|
|
|
* can be processed while
|
|
|
|
* worker is paused */
|
|
|
|
isc_refcount_t references;
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_int_fast64_t pktcount;
|
2020-01-29 13:16:04 +01:00
|
|
|
char *recvbuf;
|
2020-02-14 08:14:03 +01:00
|
|
|
bool recvbuf_inuse;
|
2019-11-05 13:55:54 -08:00
|
|
|
} isc__networker_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A general handle for a connection bound to a networker. For UDP
|
|
|
|
* connections we have peer address here, so both TCP and UDP can be
|
|
|
|
* handled with a simple send-like function
|
|
|
|
*/
|
2020-07-01 00:49:12 -07:00
|
|
|
#define NMHANDLE_MAGIC ISC_MAGIC('N', 'M', 'H', 'D')
|
|
|
|
#define VALID_NMHANDLE(t) \
|
|
|
|
(ISC_MAGIC_VALID(t, NMHANDLE_MAGIC) && \
|
|
|
|
atomic_load(&(t)->references) > 0)
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
typedef void (*isc__nm_closecb)(isc_nmhandle_t *);
|
|
|
|
|
|
|
|
struct isc_nmhandle {
|
2020-02-14 08:14:03 +01:00
|
|
|
int magic;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_refcount_t references;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The socket is not 'attached' in the traditional
|
|
|
|
* reference-counting sense. Instead, we keep all handles in an
|
|
|
|
* array in the socket object. This way, we don't have circular
|
|
|
|
* dependencies and we can close all handles when we're destroying
|
|
|
|
* the socket.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nmsocket_t *sock;
|
2019-12-16 18:24:55 -08:00
|
|
|
size_t ah_pos; /* Position in the socket's 'active handles' array */
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_sockaddr_t peer;
|
|
|
|
isc_sockaddr_t local;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nm_opaquecb_t doreset; /* reset extra callback, external */
|
|
|
|
isc_nm_opaquecb_t dofree; /* free extra callback, external */
|
2020-02-14 08:14:03 +01:00
|
|
|
void *opaque;
|
|
|
|
char extra[];
|
2019-11-05 13:55:54 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* An interface - an address we can listen on.
|
|
|
|
*/
|
|
|
|
struct isc_nmiface {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_sockaddr_t addr;
|
2019-11-05 13:55:54 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef enum isc__netievent_type {
|
|
|
|
netievent_udpsend,
|
2020-03-20 11:55:10 +01:00
|
|
|
netievent_udpstop,
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
netievent_tcpconnect,
|
|
|
|
netievent_tcpsend,
|
|
|
|
netievent_tcpstartread,
|
|
|
|
netievent_tcppauseread,
|
2020-03-24 13:38:51 +01:00
|
|
|
netievent_tcpchildaccept,
|
|
|
|
netievent_tcpaccept,
|
2019-12-09 12:24:46 -08:00
|
|
|
netievent_tcpstop,
|
2019-12-02 13:54:44 +01:00
|
|
|
netievent_tcpclose,
|
2020-06-22 16:45:47 -07:00
|
|
|
|
|
|
|
netievent_tcpdnssend,
|
2020-06-10 11:32:39 +02:00
|
|
|
netievent_tcpdnsclose,
|
2020-03-20 11:55:10 +01:00
|
|
|
|
|
|
|
netievent_closecb,
|
|
|
|
netievent_shutdown,
|
|
|
|
netievent_stop,
|
2019-12-16 18:24:55 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
netievent_prio = 0xff, /* event type values higher than this
|
|
|
|
* will be treated as high-priority
|
|
|
|
* events, which can be processed
|
|
|
|
* while the netmgr is paused.
|
|
|
|
*/
|
2019-12-02 13:54:44 +01:00
|
|
|
netievent_udplisten,
|
|
|
|
netievent_tcplisten,
|
2019-11-05 13:55:54 -08:00
|
|
|
} isc__netievent_type;
|
|
|
|
|
|
|
|
typedef union {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nm_recv_cb_t recv;
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_nm_cb_t send;
|
|
|
|
isc_nm_cb_t connect;
|
2019-11-05 13:55:54 -08:00
|
|
|
} isc__nm_cb_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wrapper around uv_req_t with 'our' fields in it. req->data should
|
|
|
|
* always point to its parent. Note that we always allocate more than
|
|
|
|
* sizeof(struct) because we make room for different req types;
|
|
|
|
*/
|
2020-02-13 14:44:37 -08:00
|
|
|
#define UVREQ_MAGIC ISC_MAGIC('N', 'M', 'U', 'R')
|
2020-02-12 13:59:18 +01:00
|
|
|
#define VALID_UVREQ(t) ISC_MAGIC_VALID(t, UVREQ_MAGIC)
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
typedef struct isc__nm_uvreq {
|
2020-02-14 08:14:03 +01:00
|
|
|
int magic;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
isc_nmhandle_t *handle;
|
2020-02-14 08:14:03 +01:00
|
|
|
uv_buf_t uvbuf; /* translated isc_region_t, to be
|
|
|
|
* sent or received */
|
|
|
|
isc_sockaddr_t local; /* local address */
|
|
|
|
isc_sockaddr_t peer; /* peer address */
|
|
|
|
isc__nm_cb_t cb; /* callback */
|
|
|
|
void *cbarg; /* callback argument */
|
|
|
|
uv_pipe_t ipc; /* used for sending socket
|
|
|
|
* uv_handles to other threads */
|
2019-11-05 13:55:54 -08:00
|
|
|
union {
|
2020-02-14 08:14:03 +01:00
|
|
|
uv_req_t req;
|
2020-02-12 13:59:18 +01:00
|
|
|
uv_getaddrinfo_t getaddrinfo;
|
|
|
|
uv_getnameinfo_t getnameinfo;
|
2020-02-14 08:14:03 +01:00
|
|
|
uv_shutdown_t shutdown;
|
|
|
|
uv_write_t write;
|
|
|
|
uv_connect_t connect;
|
|
|
|
uv_udp_send_t udp_send;
|
|
|
|
uv_fs_t fs;
|
|
|
|
uv_work_t work;
|
2019-11-05 13:55:54 -08:00
|
|
|
} uv_req;
|
|
|
|
} isc__nm_uvreq_t;
|
|
|
|
|
|
|
|
typedef struct isc__netievent__socket {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__netievent_type type;
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_nmsocket_t *sock;
|
2019-11-05 13:55:54 -08:00
|
|
|
} isc__netievent__socket_t;
|
|
|
|
|
|
|
|
typedef isc__netievent__socket_t isc__netievent_udplisten_t;
|
2019-12-09 12:24:46 -08:00
|
|
|
typedef isc__netievent__socket_t isc__netievent_udpstop_t;
|
|
|
|
typedef isc__netievent__socket_t isc__netievent_tcpstop_t;
|
2019-11-05 13:55:54 -08:00
|
|
|
typedef isc__netievent__socket_t isc__netievent_tcpclose_t;
|
|
|
|
typedef isc__netievent__socket_t isc__netievent_startread_t;
|
|
|
|
typedef isc__netievent__socket_t isc__netievent_pauseread_t;
|
2020-03-26 14:25:06 +01:00
|
|
|
typedef isc__netievent__socket_t isc__netievent_closecb_t;
|
2020-06-10 11:32:39 +02:00
|
|
|
typedef isc__netievent__socket_t isc__netievent_tcpdnsclose_t;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
typedef struct isc__netievent__socket_req {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__netievent_type type;
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
isc__nm_uvreq_t *req;
|
2019-11-05 13:55:54 -08:00
|
|
|
} isc__netievent__socket_req_t;
|
|
|
|
|
|
|
|
typedef isc__netievent__socket_req_t isc__netievent_tcpconnect_t;
|
|
|
|
typedef isc__netievent__socket_req_t isc__netievent_tcplisten_t;
|
|
|
|
typedef isc__netievent__socket_req_t isc__netievent_tcpsend_t;
|
2020-06-22 16:45:47 -07:00
|
|
|
typedef isc__netievent__socket_req_t isc__netievent_tcpdnssend_t;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-03-24 13:38:51 +01:00
|
|
|
typedef struct isc__netievent__socket_streaminfo_quota {
|
2020-02-14 08:14:03 +01:00
|
|
|
isc__netievent_type type;
|
|
|
|
isc_nmsocket_t *sock;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_uv_stream_info_t streaminfo;
|
2020-03-24 13:38:51 +01:00
|
|
|
isc_quota_t *quota;
|
|
|
|
} isc__netievent__socket_streaminfo_quota_t;
|
2020-01-07 12:02:41 +01:00
|
|
|
|
2020-03-24 13:38:51 +01:00
|
|
|
typedef isc__netievent__socket_streaminfo_quota_t
|
|
|
|
isc__netievent_tcpchildaccept_t;
|
2020-01-07 12:02:41 +01:00
|
|
|
|
2019-12-08 22:44:08 +01:00
|
|
|
typedef struct isc__netievent__socket_handle {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__netievent_type type;
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
isc_nmhandle_t *handle;
|
2019-12-08 22:44:08 +01:00
|
|
|
} isc__netievent__socket_handle_t;
|
|
|
|
|
2020-03-24 13:38:51 +01:00
|
|
|
typedef struct isc__netievent__socket_quota {
|
|
|
|
isc__netievent_type type;
|
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
isc_quota_t *quota;
|
|
|
|
} isc__netievent__socket_quota_t;
|
|
|
|
|
|
|
|
typedef isc__netievent__socket_quota_t isc__netievent_tcpaccept_t;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
typedef struct isc__netievent_udpsend {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__netievent_type type;
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
isc_sockaddr_t peer;
|
|
|
|
isc__nm_uvreq_t *req;
|
2019-11-05 13:55:54 -08:00
|
|
|
} isc__netievent_udpsend_t;
|
|
|
|
|
|
|
|
typedef struct isc__netievent {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__netievent_type type;
|
2019-11-05 13:55:54 -08:00
|
|
|
} isc__netievent_t;
|
|
|
|
|
2019-11-22 14:13:19 +01:00
|
|
|
typedef isc__netievent_t isc__netievent_shutdown_t;
|
|
|
|
typedef isc__netievent_t isc__netievent_stop_t;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
typedef union {
|
2020-02-14 08:14:03 +01:00
|
|
|
isc__netievent_t ni;
|
|
|
|
isc__netievent__socket_t nis;
|
|
|
|
isc__netievent__socket_req_t nisr;
|
|
|
|
isc__netievent_udpsend_t nius;
|
2020-03-24 13:38:51 +01:00
|
|
|
isc__netievent__socket_quota_t nisq;
|
|
|
|
isc__netievent__socket_streaminfo_quota_t nissq;
|
2019-11-05 13:55:54 -08:00
|
|
|
} isc__netievent_storage_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Network manager
|
|
|
|
*/
|
2020-02-13 14:44:37 -08:00
|
|
|
#define NM_MAGIC ISC_MAGIC('N', 'E', 'T', 'M')
|
2020-02-12 13:59:18 +01:00
|
|
|
#define VALID_NM(t) ISC_MAGIC_VALID(t, NM_MAGIC)
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
struct isc_nm {
|
2020-02-14 08:14:03 +01:00
|
|
|
int magic;
|
|
|
|
isc_refcount_t references;
|
|
|
|
isc_mem_t *mctx;
|
|
|
|
uint32_t nworkers;
|
|
|
|
isc_mutex_t lock;
|
|
|
|
isc_condition_t wkstatecond;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__networker_t *workers;
|
2019-11-21 17:08:06 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_stats_t *stats;
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_mempool_t *reqpool;
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_mutex_t reqlock;
|
2019-11-21 17:08:06 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_mempool_t *evpool;
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_mutex_t evlock;
|
2019-11-21 17:08:06 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_uint_fast32_t workers_running;
|
|
|
|
atomic_uint_fast32_t workers_paused;
|
|
|
|
atomic_uint_fast32_t maxudp;
|
2020-02-14 08:14:03 +01:00
|
|
|
atomic_bool paused;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-22 15:57:42 -08:00
|
|
|
/*
|
2020-02-20 14:49:36 -08:00
|
|
|
* Active connections are being closed and new connections are
|
2019-11-22 15:57:42 -08:00
|
|
|
* no longer allowed.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_bool closing;
|
2019-11-22 15:57:42 -08:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* A worker is actively waiting for other workers, for example to
|
|
|
|
* stop listening; that means no other thread can do the same thing
|
|
|
|
* or pause, or we'll deadlock. We have to either re-enqueue our
|
|
|
|
* event or wait for the other one to finish if we want to pause.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_bool interlocked;
|
2019-11-20 22:33:35 +01:00
|
|
|
|
|
|
|
/*
|
2020-02-20 14:49:36 -08:00
|
|
|
* Timeout values for TCP connections, corresponding to
|
2019-11-20 22:33:35 +01:00
|
|
|
* tcp-intiial-timeout, tcp-idle-timeout, tcp-keepalive-timeout,
|
|
|
|
* and tcp-advertised-timeout. Note that these are stored in
|
|
|
|
* milliseconds so they can be used directly with the libuv timer,
|
|
|
|
* but they are configured in tenths of seconds.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
uint32_t init;
|
|
|
|
uint32_t idle;
|
|
|
|
uint32_t keepalive;
|
|
|
|
uint32_t advertised;
|
2019-11-05 13:55:54 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef enum isc_nmsocket_type {
|
|
|
|
isc_nm_udpsocket,
|
|
|
|
isc_nm_udplistener, /* Aggregate of nm_udpsocks */
|
|
|
|
isc_nm_tcpsocket,
|
|
|
|
isc_nm_tcplistener,
|
|
|
|
isc_nm_tcpdnslistener,
|
2020-06-10 11:32:39 +02:00
|
|
|
isc_nm_tcpdnssocket,
|
2019-11-05 13:55:54 -08:00
|
|
|
} isc_nmsocket_type;
|
|
|
|
|
|
|
|
/*%
|
|
|
|
* A universal structure for either a single socket or a group of
|
|
|
|
* dup'd/SO_REUSE_PORT-using sockets listening on the same interface.
|
|
|
|
*/
|
2020-02-13 14:44:37 -08:00
|
|
|
#define NMSOCK_MAGIC ISC_MAGIC('N', 'M', 'S', 'K')
|
2020-02-12 13:59:18 +01:00
|
|
|
#define VALID_NMSOCK(t) ISC_MAGIC_VALID(t, NMSOCK_MAGIC)
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
/*%
|
|
|
|
* Index into socket stat counter arrays.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
enum { STATID_OPEN = 0,
|
|
|
|
STATID_OPENFAIL = 1,
|
|
|
|
STATID_CLOSE = 2,
|
|
|
|
STATID_BINDFAIL = 3,
|
|
|
|
STATID_CONNECTFAIL = 4,
|
|
|
|
STATID_CONNECT = 5,
|
|
|
|
STATID_ACCEPTFAIL = 6,
|
|
|
|
STATID_ACCEPT = 7,
|
|
|
|
STATID_SENDFAIL = 8,
|
|
|
|
STATID_RECVFAIL = 9,
|
|
|
|
STATID_ACTIVE = 10 };
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
struct isc_nmsocket {
|
|
|
|
/*% Unlocked, RO */
|
2020-02-14 08:14:03 +01:00
|
|
|
int magic;
|
|
|
|
int tid;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nmsocket_type type;
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_nm_t *mgr;
|
2020-01-15 14:53:42 +01:00
|
|
|
/*% Parent socket for multithreaded listeners */
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nmsocket_t *parent;
|
2020-01-15 14:53:42 +01:00
|
|
|
/*% Listener socket this connection was accepted on */
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nmsocket_t *listener;
|
2020-06-09 17:07:16 -07:00
|
|
|
/*% Self, for self-contained unreferenced sockets (tcpdns) */
|
|
|
|
isc_nmsocket_t *self;
|
2019-11-22 13:19:45 +01:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*%
|
2019-11-22 13:19:45 +01:00
|
|
|
* quota is the TCP client, attached when a TCP connection
|
|
|
|
* is established. pquota is a non-attached pointer to the
|
|
|
|
* TCP client quota, stored in listening sockets but only
|
|
|
|
* attached in connected sockets.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_quota_t *quota;
|
|
|
|
isc_quota_t *pquota;
|
2020-03-24 13:38:51 +01:00
|
|
|
isc_quota_cb_t quotacb;
|
2020-01-05 01:02:12 -08:00
|
|
|
|
|
|
|
/*%
|
|
|
|
* Socket statistics
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
const isc_statscounter_t *statsindex;
|
2019-11-22 13:19:45 +01:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*%
|
2019-11-22 13:19:45 +01:00
|
|
|
* TCP read timeout timer.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
uv_timer_t timer;
|
2020-02-14 08:14:03 +01:00
|
|
|
bool timer_initialized;
|
|
|
|
uint64_t read_timeout;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*% outer socket is for 'wrapped' sockets - e.g. tcpdns in tcp */
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nmsocket_t *outer;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*% server socket for connections */
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nmsocket_t *server;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*% Child sockets for multi-socket setups */
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nmsocket_t *children;
|
2020-02-14 08:14:03 +01:00
|
|
|
int nchildren;
|
|
|
|
isc_nmiface_t *iface;
|
2020-06-10 11:32:39 +02:00
|
|
|
isc_nmhandle_t *statichandle;
|
2020-06-04 23:13:54 -07:00
|
|
|
isc_nmhandle_t *outerhandle;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*% Extra data allocated at the end of each isc_nmhandle_t */
|
2020-02-12 13:59:18 +01:00
|
|
|
size_t extrahandlesize;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-22 13:19:45 +01:00
|
|
|
/*% TCP backlog */
|
2020-02-12 13:59:18 +01:00
|
|
|
int backlog;
|
2019-11-22 13:19:45 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*% libuv data */
|
2020-02-14 08:14:03 +01:00
|
|
|
uv_os_sock_t fd;
|
2020-02-12 13:59:18 +01:00
|
|
|
union uv_any_handle uv_handle;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*% Peer address */
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_sockaddr_t peer;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/* Atomic */
|
2019-12-03 00:07:59 -08:00
|
|
|
/*% Number of running (e.g. listening) child sockets */
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_int_fast32_t rchildren;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*%
|
2019-12-03 00:07:59 -08:00
|
|
|
* Socket is active if it's listening, working, etc. If it's
|
|
|
|
* closing, then it doesn't make a sense, for example, to
|
|
|
|
* push handles or reqs for reuse.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_bool active;
|
|
|
|
atomic_bool destroying;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*%
|
|
|
|
* Socket is closed if it's not active and all the possible
|
|
|
|
* callbacks were fired, there are no active handles, etc.
|
2019-12-03 00:07:59 -08:00
|
|
|
* If active==false but closed==false, that means the socket
|
|
|
|
* is closing.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
2020-02-14 08:14:03 +01:00
|
|
|
atomic_bool closed;
|
|
|
|
atomic_bool listening;
|
|
|
|
atomic_bool listen_error;
|
2019-12-16 18:24:55 -08:00
|
|
|
atomic_bool connected;
|
|
|
|
atomic_bool connect_error;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_refcount_t references;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*%
|
2020-06-10 11:32:39 +02:00
|
|
|
* Established an outgoing connection, as client not server.
|
|
|
|
*/
|
|
|
|
atomic_bool client;
|
|
|
|
|
|
|
|
/*%
|
|
|
|
* TCPDNS socket has been set not to pipeline.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_bool sequential;
|
2019-11-08 10:52:49 -08:00
|
|
|
|
|
|
|
/*%
|
|
|
|
* TCPDNS socket has exceeded the maximum number of
|
2020-02-20 14:49:36 -08:00
|
|
|
* simultaneous requests per connection, so will be temporarily
|
2019-11-08 10:52:49 -08:00
|
|
|
* restricted from pipelining.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_bool overlimit;
|
2019-11-08 10:52:49 -08:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%
|
|
|
|
* TCPDNS socket in sequential mode is currently processing a packet,
|
|
|
|
* we need to wait until it finishes.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_bool processing;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-08 10:52:49 -08:00
|
|
|
/*%
|
|
|
|
* A TCP socket has had isc_nm_pauseread() called.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_bool readpaused;
|
2019-11-08 10:52:49 -08:00
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
/*%
|
|
|
|
* A TCP or TCPDNS socket has been set to use the keepalive
|
|
|
|
* timeout instead of the default idle timeout.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_bool keepalive;
|
2019-11-20 22:33:35 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%
|
|
|
|
* 'spare' handles for that can be reused to avoid allocations,
|
|
|
|
* for UDP.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_astack_t *inactivehandles;
|
|
|
|
isc_astack_t *inactivereqs;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*%
|
|
|
|
* Used to wait for TCP listening events to complete, and
|
|
|
|
* for the number of running children to reach zero during
|
|
|
|
* shutdown.
|
2019-12-02 13:54:44 +01:00
|
|
|
*/
|
2020-02-14 08:14:03 +01:00
|
|
|
isc_mutex_t lock;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_condition_t cond;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*%
|
|
|
|
* Used to pass a result back from TCP listening events.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_result_t result;
|
2019-12-02 13:54:44 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%
|
|
|
|
* List of active handles.
|
2019-11-08 10:52:49 -08:00
|
|
|
* ah - current position in 'ah_frees'; this represents the
|
|
|
|
* current number of active handles;
|
|
|
|
* ah_size - size of the 'ah_frees' and 'ah_handles' arrays
|
|
|
|
* ah_handles - array pointers to active handles
|
|
|
|
*
|
2019-11-05 13:55:54 -08:00
|
|
|
* Adding a handle
|
2019-11-08 10:52:49 -08:00
|
|
|
* - if ah == ah_size, reallocate
|
|
|
|
* - x = ah_frees[ah]
|
|
|
|
* - ah_frees[ah++] = 0;
|
2019-11-05 13:55:54 -08:00
|
|
|
* - ah_handles[x] = handle
|
|
|
|
* - x must be stored with the handle!
|
|
|
|
* Removing a handle:
|
2019-11-08 10:52:49 -08:00
|
|
|
* - ah_frees[--ah] = x
|
2019-11-05 13:55:54 -08:00
|
|
|
* - ah_handles[x] = NULL;
|
|
|
|
*
|
2019-11-15 13:22:13 -08:00
|
|
|
* XXX: for now this is locked with socket->lock, but we
|
|
|
|
* might want to change it to something lockless in the
|
|
|
|
* future.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_int_fast32_t ah;
|
2020-02-14 08:14:03 +01:00
|
|
|
size_t ah_size;
|
|
|
|
size_t *ah_frees;
|
|
|
|
isc_nmhandle_t **ah_handles;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*% Buffer for TCPDNS processing */
|
2020-02-14 08:14:03 +01:00
|
|
|
size_t buf_size;
|
|
|
|
size_t buf_len;
|
2020-02-12 13:59:18 +01:00
|
|
|
unsigned char *buf;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*%
|
2019-11-08 10:52:49 -08:00
|
|
|
* This function will be called with handle->sock
|
|
|
|
* as the argument whenever a handle's references drop
|
|
|
|
* to zero, after its reset callback has been called.
|
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nm_opaquecb_t closehandle_cb;
|
2019-11-08 10:52:49 -08:00
|
|
|
|
2020-09-11 10:53:31 +02:00
|
|
|
isc_nm_recv_cb_t recv_cb;
|
|
|
|
void *recv_cbarg;
|
2019-11-25 18:36:14 -03:00
|
|
|
|
2020-09-11 10:53:31 +02:00
|
|
|
isc_nm_accept_cb_t accept_cb;
|
2020-02-14 08:14:03 +01:00
|
|
|
void *accept_cbarg;
|
2019-11-05 13:55:54 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nm_in_netthread(void);
|
|
|
|
/*%
|
|
|
|
* Returns 'true' if we're in the network thread.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void *
|
|
|
|
isc__nm_get_ievent(isc_nm_t *mgr, isc__netievent_type type);
|
|
|
|
/*%<
|
|
|
|
* Allocate an ievent and set the type.
|
|
|
|
*/
|
2019-11-19 11:56:00 +01:00
|
|
|
void
|
|
|
|
isc__nm_put_ievent(isc_nm_t *mgr, void *ievent);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event);
|
|
|
|
/*%<
|
|
|
|
* Enqueue an ievent onto a specific worker queue. (This the only safe
|
|
|
|
* way to use an isc__networker_t from another thread.)
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf);
|
|
|
|
/*%<
|
|
|
|
* Free a buffer allocated for a receive operation.
|
|
|
|
*
|
|
|
|
* Note that as currently implemented, this doesn't actually
|
|
|
|
* free anything, marks the isc__networker's UDP receive buffer
|
|
|
|
* as "not in use".
|
|
|
|
*/
|
|
|
|
|
|
|
|
isc_nmhandle_t *
|
|
|
|
isc__nmhandle_get(isc_nmsocket_t *sock, isc_sockaddr_t *peer,
|
|
|
|
isc_sockaddr_t *local);
|
|
|
|
/*%<
|
|
|
|
* Get a handle for the socket 'sock', allocating a new one
|
2020-02-20 14:49:36 -08:00
|
|
|
* if there isn't one available in 'sock->inactivehandles'.
|
2019-11-05 13:55:54 -08:00
|
|
|
*
|
|
|
|
* If 'peer' is not NULL, set the handle's peer address to 'peer',
|
|
|
|
* otherwise set it to 'sock->peer'.
|
|
|
|
*
|
|
|
|
* If 'local' is not NULL, set the handle's local address to 'local',
|
|
|
|
* otherwise set it to 'sock->iface->addr'.
|
2020-06-04 23:13:54 -07:00
|
|
|
*
|
|
|
|
* 'sock' will be attached to 'handle->sock'. The caller may need
|
|
|
|
* to detach the socket afterward.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
|
|
|
|
isc__nm_uvreq_t *
|
|
|
|
isc__nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Get a UV request structure for the socket 'sock', allocating a
|
2020-02-20 14:49:36 -08:00
|
|
|
* new one if there isn't one available in 'sock->inactivereqs'.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_uvreq_put(isc__nm_uvreq_t **req, isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Completes the use of a UV request structure, setting '*req' to NULL.
|
|
|
|
*
|
|
|
|
* The UV request is pushed onto the 'sock->inactivereqs' stack or,
|
|
|
|
* if that doesn't work, freed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type,
|
|
|
|
isc_nmiface_t *iface);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
2020-01-05 01:02:12 -08:00
|
|
|
* Initialize socket 'sock', attach it to 'mgr', and set it to type 'type'
|
|
|
|
* and its interface to 'iface'.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
|
2020-06-04 14:54:36 -07:00
|
|
|
void
|
|
|
|
isc__nmsocket_attach(isc_nmsocket_t *sock, isc_nmsocket_t **target);
|
|
|
|
/*%<
|
|
|
|
* Attach to a socket, increasing refcount
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_detach(isc_nmsocket_t **socketp);
|
|
|
|
/*%<
|
|
|
|
* Detach from socket, decreasing refcount and possibly destroying the
|
|
|
|
* socket if it's no longer referenced.
|
|
|
|
*/
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
|
|
|
isc__nmsocket_prep_destroy(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Market 'sock' as inactive, close it if necessary, and destroy it
|
|
|
|
* if there are no remaining references or active handles.
|
|
|
|
*/
|
|
|
|
|
2020-01-16 11:52:58 +01:00
|
|
|
bool
|
|
|
|
isc__nmsocket_active(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
2020-01-16 12:13:28 +01:00
|
|
|
* Determine whether 'sock' is active by checking 'sock->active'
|
|
|
|
* or, for child sockets, 'sock->parent->active'.
|
2020-01-16 11:52:58 +01:00
|
|
|
*/
|
|
|
|
|
2020-06-20 15:03:05 -07:00
|
|
|
void
|
|
|
|
isc__nmsocket_clearcb(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Clear the recv and accept callbacks in 'sock'.
|
|
|
|
*/
|
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_closecb(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-19 11:56:00 +01:00
|
|
|
/*%<
|
|
|
|
* Issue a 'handle closed' callback on the socket.
|
|
|
|
*/
|
|
|
|
|
2019-11-22 14:13:19 +01:00
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_shutdown(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-22 14:13:19 +01:00
|
|
|
/*%<
|
|
|
|
* Walk through all uv handles, get the underlying sockets and issue
|
|
|
|
* close on them.
|
|
|
|
*/
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_result_t
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__nm_udp_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
|
|
|
|
void *cbarg);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
2020-02-20 14:49:36 -08:00
|
|
|
* Back-end implementation of isc_nm_send() for UDP handles.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
void
|
|
|
|
isc__nm_udp_stoplistening(isc_nmsocket_t *sock);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_udpstop(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_udpsend(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
|
|
|
* Callback handlers for asynchronous UDP events (listen, stoplisten, send).
|
|
|
|
*/
|
|
|
|
|
|
|
|
isc_result_t
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__nm_tcp_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
|
|
|
|
void *cbarg);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
2020-02-20 14:49:36 -08:00
|
|
|
* Back-end implementation of isc_nm_send() for TCP handles.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
isc_result_t
|
|
|
|
isc__nm_tcp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
|
2020-06-10 11:32:39 +02:00
|
|
|
/*
|
|
|
|
* Back-end implementation of isc_nm_read() for TCP handles.
|
|
|
|
*/
|
2020-03-20 11:55:10 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
|
|
|
isc__nm_tcp_close(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Close a TCP socket.
|
|
|
|
*/
|
2020-03-20 11:55:10 +01:00
|
|
|
isc_result_t
|
|
|
|
isc__nm_tcp_pauseread(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Pause reading on this socket, while still remembering the callback.
|
|
|
|
*/
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc__nm_tcp_resumeread(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Resume reading from socket.
|
|
|
|
*
|
|
|
|
*/
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-22 14:13:19 +01:00
|
|
|
void
|
|
|
|
isc__nm_tcp_shutdown(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
2020-06-05 17:32:36 -07:00
|
|
|
* Called during the shutdown process to close and clean up connected
|
|
|
|
* sockets.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2020-06-10 11:32:39 +02:00
|
|
|
isc__nm_tcp_cancelread(isc_nmhandle_t *handle);
|
2020-06-05 17:32:36 -07:00
|
|
|
/*%<
|
2020-06-10 11:32:39 +02:00
|
|
|
* Stop reading on a connected TCP handle.
|
2019-11-22 14:13:19 +01:00
|
|
|
*/
|
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
void
|
|
|
|
isc__nm_tcp_stoplistening(isc_nmsocket_t *sock);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_tcpconnect(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_tcplisten(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-03-24 13:38:51 +01:00
|
|
|
isc__nm_async_tcpaccept(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-28 10:21:34 +01:00
|
|
|
void
|
2020-03-24 13:38:51 +01:00
|
|
|
isc__nm_async_tcpchildaccept(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-03-24 13:38:51 +01:00
|
|
|
isc__nm_async_tcpstop(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-28 10:21:34 +01:00
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_tcpsend(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_startread(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_pauseread(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-03-20 11:55:10 +01:00
|
|
|
isc__nm_async_tcp_startread(isc__networker_t *worker, isc__netievent_t *ev0);
|
|
|
|
void
|
|
|
|
isc__nm_async_tcp_pauseread(isc__networker_t *worker, isc__netievent_t *ev0);
|
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_tcpclose(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
|
|
|
* Callback handlers for asynchronous TCP events (connect, listen,
|
2019-11-22 15:57:42 -08:00
|
|
|
* stoplisten, send, read, pause, close).
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc__nm_tcpdns_send(isc_nmhandle_t *handle, isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
|
|
|
/*%<
|
2020-02-20 14:49:36 -08:00
|
|
|
* Back-end implementation of isc_nm_send() for TCPDNS handles.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_tcpdns_close(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Close a TCPDNS socket.
|
|
|
|
*/
|
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
void
|
|
|
|
isc__nm_tcpdns_stoplistening(isc_nmsocket_t *sock);
|
|
|
|
|
2019-12-06 22:25:52 +01:00
|
|
|
void
|
2019-12-09 12:24:46 -08:00
|
|
|
isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ev0);
|
2020-06-22 16:45:47 -07:00
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_async_tcpdnssend(isc__networker_t *worker, isc__netievent_t *ev0);
|
2019-12-06 22:25:52 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
#define isc__nm_uverr2result(x) \
|
|
|
|
isc___nm_uverr2result(x, true, __FILE__, __LINE__)
|
|
|
|
isc_result_t
|
2020-02-12 13:59:18 +01:00
|
|
|
isc___nm_uverr2result(int uverr, bool dolog, const char *file,
|
|
|
|
unsigned int line);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
|
|
|
* Convert a libuv error value into an isc_result_t. The
|
|
|
|
* list of supported error values is not complete; new users
|
|
|
|
* of this function should add any expected errors that are
|
|
|
|
* not already there.
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nm_acquire_interlocked(isc_nm_t *mgr);
|
|
|
|
/*%<
|
|
|
|
* Try to acquire interlocked state; return true if successful.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_drop_interlocked(isc_nm_t *mgr);
|
|
|
|
/*%<
|
|
|
|
* Drop interlocked state; signal waiters.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_acquire_interlocked_force(isc_nm_t *mgr);
|
|
|
|
/*%<
|
|
|
|
* Actively wait for interlocked state.
|
|
|
|
*/
|
2020-01-05 01:02:12 -08:00
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_incstats(isc_nm_t *mgr, isc_statscounter_t counterid);
|
|
|
|
/*%<
|
|
|
|
* Increment socket-related statistics counters.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_decstats(isc_nm_t *mgr, isc_statscounter_t counterid);
|
|
|
|
/*%<
|
|
|
|
* Decrement socket-related statistics counters.
|
|
|
|
*/
|
2020-07-21 13:29:14 +02:00
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket_freebind(const uv_handle_t *handle);
|
|
|
|
/*%<
|
|
|
|
* Set the IP_FREEBIND (or equivalent) socket option on the uv_handle
|
|
|
|
*/
|