2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-09-01 15:05:23 +00:00

refactor tcpquota and pipeline refs; allow special-case overrun in isc_quota

- if the TCP quota has been exceeded but there are no clients listening
  for new connections on the interface, we can now force attachment to the
  quota using isc_quota_force(), instead of carrying on with the quota not
  attached.
- the TCP client quota is now referenced via a reference-counted
  'ns_tcpconn' object, one of which is created whenever a client begins
  listening for new connections, and attached to by members of that
  client's pipeline group. when the last reference to the tcpconn
  object is detached, it is freed and the TCP quota slot is released.
- reduce code duplication by adding mark_tcp_active() function
- convert counters to stdatomic

(cherry picked from commit a8dd133d270873b736c1be9bf50ebaa074f5b38f)
(cherry picked from commit 4a8fc979c4)
This commit is contained in:
Evan Hunt
2019-02-06 11:27:11 -08:00
committed by Ondřej Surý
parent a0f4a3fa65
commit 2f3876d187
7 changed files with 235 additions and 269 deletions

View File

@@ -115,6 +115,13 @@ isc_quota_attach(isc_quota_t *quota, isc_quota_t **p);
* quota if successful (ISC_R_SUCCESS or ISC_R_SOFTQUOTA). * quota if successful (ISC_R_SUCCESS or ISC_R_SOFTQUOTA).
*/ */
isc_result_t
isc_quota_force(isc_quota_t *quota, isc_quota_t **p);
/*%<
* Like isc_quota_attach, but will attach '*p' to the quota
* even if the hard quota has been exceeded.
*/
void void
isc_quota_detach(isc_quota_t **p); isc_quota_detach(isc_quota_t **p);
/*%< /*%<

View File

@@ -83,20 +83,36 @@ isc_quota_release(isc_quota_t *quota) {
INSIST(atomic_fetch_sub(&quota->used, 1) > 0); INSIST(atomic_fetch_sub(&quota->used, 1) > 0);
} }
isc_result_t static isc_result_t
isc_quota_attach(isc_quota_t *quota, isc_quota_t **p) doattach(isc_quota_t *quota, isc_quota_t **p, bool force) {
{
isc_result_t result; isc_result_t result;
INSIST(p != NULL && *p == NULL); REQUIRE(p != NULL && *p == NULL);
result = isc_quota_reserve(quota); result = isc_quota_reserve(quota);
if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA) if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA) {
*p = quota; *p = quota;
} else if (result == ISC_R_QUOTA && force) {
/* attach anyway */
atomic_fetch_add(&quota->used, 1);
*p = quota;
result = ISC_R_SUCCESS;
}
return (result); return (result);
} }
isc_result_t
isc_quota_attach(isc_quota_t *quota, isc_quota_t **p) {
return (doattach(quota, p, false));
}
isc_result_t
isc_quota_force(isc_quota_t *quota, isc_quota_t **p) {
return (doattach(quota, p, true));
}
void void
isc_quota_detach(isc_quota_t **p) isc_quota_detach(isc_quota_t **p) {
{
INSIST(p != NULL && *p != NULL); INSIST(p != NULL && *p != NULL);
isc_quota_release(*p); isc_quota_release(*p);
*p = NULL; *p = NULL;

View File

@@ -449,6 +449,7 @@ isc_portset_removerange
isc_quota_attach isc_quota_attach
isc_quota_destroy isc_quota_destroy
isc_quota_detach isc_quota_detach
isc_quota_force
isc_quota_getmax isc_quota_getmax
isc_quota_getsoft isc_quota_getsoft
isc_quota_getused isc_quota_getused

View File

@@ -239,8 +239,7 @@ static void ns_client_endrequest(ns_client_t *client);
static void client_start(isc_task_t *task, isc_event_t *event); static void client_start(isc_task_t *task, isc_event_t *event);
static void ns_client_dumpmessage(ns_client_t *client, const char *reason); static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
dns_dispatch_t *disp, ns_client_t *oldclient, dns_dispatch_t *disp, bool tcp);
bool tcp);
static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
isc_socket_t *sock, ns_client_t *oldclient); isc_socket_t *sock, ns_client_t *oldclient);
static void compute_cookie(ns_client_t *client, uint32_t when, static void compute_cookie(ns_client_t *client, uint32_t when,
@@ -318,16 +317,32 @@ read_settimeout(ns_client_t *client, bool newconn) {
} }
/*% /*%
* Allocate a reference counter that will track the number of client structures * Allocate a reference-counted object that will maintain a single pointer to
* using the TCP connection that 'client' called accept() for. This counter * the (also reference-counted) TCP client quota, shared between all the
* will be shared between all client structures associated with this TCP * clients processing queries on a single TCP connection, so that all
* connection. * clients sharing the one socket will together consume only one slot in
* the 'tcp-clients' quota.
*/ */
static void static isc_result_t
pipeline_init(ns_client_t *client) { tcpconn_init(ns_client_t *client, bool force) {
isc_refcount_t *refs; isc_result_t result;
isc_quota_t *quota = NULL;
ns_tcpconn_t *tconn = NULL;
REQUIRE(client->pipeline_refs == NULL); REQUIRE(client->tcpconn == NULL);
/*
* Try to attach to the quota first, so we won't pointlessly
* allocate memory for a tcpconn object if we can't get one.
*/
if (force) {
result = isc_quota_force(&client->sctx->tcpquota, &quota);
} else {
result = isc_quota_attach(&client->sctx->tcpquota, &quota);
}
if (result != ISC_R_SUCCESS) {
return (result);
}
/* /*
* A global memory context is used for the allocation as different * A global memory context is used for the allocation as different
@@ -337,79 +352,82 @@ pipeline_init(ns_client_t *client) {
* contention here is expected to be negligible, given that this code * contention here is expected to be negligible, given that this code
* is only executed for TCP connections. * is only executed for TCP connections.
*/ */
refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs)); tconn = isc_mem_allocate(client->sctx->mctx, sizeof(*tconn));
isc_refcount_init(refs, 1);
client->pipeline_refs = refs; isc_refcount_init(&tconn->refs, 1);
tconn->tcpquota = quota;
quota = NULL;
tconn->pipelined = false;
client->tcpconn = tconn;
return (ISC_R_SUCCESS);
} }
/*% /*%
* Increase the count of client structures using the TCP connection that * Increase the count of client structures sharing the TCP connection
* 'source' is associated with and put a pointer to that count in 'target', * that 'source' is associated with; add a pointer to the same tcpconn
* thus associating it with the same TCP connection. * to 'target', thus associating it with the same TCP connection.
*/ */
static void static void
pipeline_attach(ns_client_t *source, ns_client_t *target) { tcpconn_attach(ns_client_t *source, ns_client_t *target) {
int old_refs; int old_refs;
REQUIRE(source->pipeline_refs != NULL); REQUIRE(source->tcpconn != NULL);
REQUIRE(target->pipeline_refs == NULL); REQUIRE(target->tcpconn == NULL);
REQUIRE(source->tcpconn->pipelined);
old_refs = isc_refcount_increment(source->pipeline_refs); old_refs = isc_refcount_increment(&source->tcpconn->refs);
INSIST(old_refs > 0); INSIST(old_refs > 0);
target->pipeline_refs = source->pipeline_refs; target->tcpconn = source->tcpconn;
} }
/*% /*%
* Decrease the count of client structures using the TCP connection that * Decrease the count of client structures sharing the TCP connection that
* 'client' is associated with. If this is the last client using this TCP * 'client' is associated with. If this is the last client using this TCP
* connection, free the reference counter and return true; otherwise, return * connection, we detach from the TCP quota and free the tcpconn
* false. * object. Either way, client->tcpconn is set to NULL.
*/ */
static bool static void
pipeline_detach(ns_client_t *client) { tcpconn_detach(ns_client_t *client) {
isc_refcount_t *refs; ns_tcpconn_t *tconn = NULL;
int old_refs; int old_refs;
REQUIRE(client->pipeline_refs != NULL); REQUIRE(client->tcpconn != NULL);
refs = client->pipeline_refs; tconn = client->tcpconn;
client->pipeline_refs = NULL; client->tcpconn = NULL;
old_refs = isc_refcount_decrement(refs); old_refs = isc_refcount_decrement(&tconn->refs);
INSIST(old_refs > 0); INSIST(old_refs > 0);
if (old_refs == 1) { if (old_refs == 1) {
isc_mem_free(client->sctx->mctx, refs); isc_quota_detach(&tconn->tcpquota);
return (true); isc_mem_free(client->sctx->mctx, tconn);
} }
return (false);
} }
/* /*%
* Detach a client from the TCP client quota if appropriate, and set * Mark a client as active and increment the interface's 'ntcpactive'
* the quota pointer to NULL. * counter, as a signal that there is at least one client servicing
* TCP queries for the interface. If we reach the TCP client quota at
* some point, this will be used to determine whether a quota overrun
* should be permitted.
* *
* Sometimes when the TCP client quota is exhausted but there are no other * Marking the client active with the 'tcpactive' flag ensures proper
* clients servicing the interface, a client will be allowed to continue * accounting, by preventing us from incrementing or decrementing
* running despite not having been attached to the quota. In this event, * 'ntcpactive' more than once per client.
* the TCP quota was never attached to the client, so when the client (or
* associated pipeline group) shuts down, the quota must NOT be detached.
*
* Otherwise, if the quota pointer is set, it should be detached. If not
* set at all, we just return without doing anything.
*/ */
static void static void
tcpquota_disconnect(ns_client_t *client) { mark_tcp_active(ns_client_t *client, bool active) {
if (client->tcpquota == NULL) { if (active && !client->tcpactive) {
return; atomic_fetch_add(&client->interface->ntcpactive, 1);
} client->tcpactive = active;
} else if (!active && client->tcpactive) {
if (client->tcpattached) { uint32_t old =
isc_quota_detach(&client->tcpquota); atomic_fetch_sub(&client->interface->ntcpactive, 1);
client->tcpattached = false; INSIST(old > 0);
} else { client->tcpactive = active;
client->tcpquota = NULL;
} }
} }
@@ -502,7 +520,8 @@ exit_check(ns_client_t *client) {
INSIST(client->recursionquota == NULL); INSIST(client->recursionquota == NULL);
if (NS_CLIENTSTATE_READING == client->newstate) { if (NS_CLIENTSTATE_READING == client->newstate) {
if (!client->pipelined) { INSIST(client->tcpconn != NULL);
if (!client->tcpconn->pipelined) {
client_read(client, false); client_read(client, false);
client->newstate = NS_CLIENTSTATE_MAX; client->newstate = NS_CLIENTSTATE_MAX;
return (true); /* We're done. */ return (true); /* We're done. */
@@ -525,8 +544,8 @@ exit_check(ns_client_t *client) {
dns_tcpmsg_cancelread(&client->tcpmsg); dns_tcpmsg_cancelread(&client->tcpmsg);
} }
if (client->nreads != 0) { /* Still waiting for read cancel completion. */
/* Still waiting for read cancel completion. */ if (client->nreads > 0) {
return (true); return (true);
} }
@@ -536,43 +555,45 @@ exit_check(ns_client_t *client) {
} }
/* /*
* Detach from pipeline group and from TCP client quota, * Soon the client will be ready to accept a new TCP
* if appropriate. * connection or UDP request, but we may have enough
* clients doing that already. Check whether this client
* needs to remain active and allow it go inactive if
* not.
* *
* - If no pipeline group is active, attempt to * UDP clients always go inactive at this point, but a TCP
* detach from the TCP client quota. * client may need to stay active and return to READY
* state if no other clients are available to listen
* for TCP requests on this interface.
* *
* - If a pipeline group is active, detach from it; * Regardless, if we're going to FREED state, that means
* if the return code indicates that there no more * the system is shutting down and we don't need to
* clients left if this pipeline group, we also detach * retain clients.
* from the TCP client quota.
*
* - Otherwise we don't try to detach, we just set the
* TCP quota pointer to NULL if it wasn't NULL already.
*
* tcpquota_disconnect() will set tcpquota to NULL, either
* by detaching it or by assignment, depending on the
* needs of the client. See the comments on that function
* for further information.
*/ */
if (client->pipeline_refs == NULL || pipeline_detach(client)) { if (client->mortal && TCP_CLIENT(client) &&
tcpquota_disconnect(client); client->newstate != NS_CLIENTSTATE_FREED &&
} else { (client->sctx->options & NS_SERVER_CLIENTTEST) == 0 &&
client->tcpquota = NULL; atomic_load(&client->interface->ntcpaccepting) == 0)
client->tcpattached = false; {
/* Nobody else is accepting */
client->mortal = false;
client->newstate = NS_CLIENTSTATE_READY;
}
/*
* Detach from TCP connection and TCP client quota,
* if appropriate. If this is the last reference to
* the TCP connection in our pipeline group, the
* TCP quota slot will be released.
*/
if (client->tcpconn) {
tcpconn_detach(client);
} }
if (client->tcpsocket != NULL) { if (client->tcpsocket != NULL) {
CTRACE("closetcp"); CTRACE("closetcp");
isc_socket_detach(&client->tcpsocket); isc_socket_detach(&client->tcpsocket);
mark_tcp_active(client, false);
if (client->tcpactive) {
LOCK(&client->interface->lock);
INSIST(client->interface->ntcpactive > 0);
client->interface->ntcpactive--;
UNLOCK(&client->interface->lock);
client->tcpactive = false;
}
} }
if (client->timerset) { if (client->timerset) {
@@ -585,37 +606,6 @@ exit_check(ns_client_t *client) {
client->peeraddr_valid = false; client->peeraddr_valid = false;
client->state = NS_CLIENTSTATE_READY; client->state = NS_CLIENTSTATE_READY;
INSIST(client->recursionquota == NULL);
/*
* Now the client is ready to accept a new TCP connection
* or UDP request, but we may have enough clients doing
* that already. Check whether this client needs to remain
* active and force it to go inactive if not.
*
* UDP clients go inactive at this point, but a TCP client
* may need to remain active and go into ready state if
* no other clients are available to listen for TCP
* requests on this interface or (in the case of pipelined
* clients) to read for additional messages on the current
* connection.
*/
if (client->mortal && TCP_CLIENT(client) &&
((client->sctx->options & NS_SERVER_CLIENTTEST) == 0))
{
LOCK(&client->interface->lock);
if ((client->interface->ntcpaccepting == 0 ||
(client->pipelined &&
client->interface->ntcpactive < 2)) &&
client->newstate != NS_CLIENTSTATE_FREED)
{
client->mortal = false;
client->newstate = NS_CLIENTSTATE_READY;
}
UNLOCK(&client->interface->lock);
}
client->pipelined = false;
/* /*
* We don't need the client; send it to the inactive * We don't need the client; send it to the inactive
@@ -650,7 +640,7 @@ exit_check(ns_client_t *client) {
} }
/* Still waiting for accept cancel completion. */ /* Still waiting for accept cancel completion. */
if (! (client->naccepts == 0)) { if (client->naccepts > 0) {
return (true); return (true);
} }
@@ -661,7 +651,7 @@ exit_check(ns_client_t *client) {
} }
/* Still waiting for recv cancel completion. */ /* Still waiting for recv cancel completion. */
if (! (client->nrecvs == 0)) { if (client->nrecvs > 0) {
return (true); return (true);
} }
@@ -674,14 +664,7 @@ exit_check(ns_client_t *client) {
INSIST(client->recursionquota == NULL); INSIST(client->recursionquota == NULL);
if (client->tcplistener != NULL) { if (client->tcplistener != NULL) {
isc_socket_detach(&client->tcplistener); isc_socket_detach(&client->tcplistener);
mark_tcp_active(client, false);
if (client->tcpactive) {
LOCK(&client->interface->lock);
INSIST(client->interface->ntcpactive > 0);
client->interface->ntcpactive--;
UNLOCK(&client->interface->lock);
client->tcpactive = false;
}
} }
if (client->udpsocket != NULL) { if (client->udpsocket != NULL) {
isc_socket_detach(&client->udpsocket); isc_socket_detach(&client->udpsocket);
@@ -843,7 +826,7 @@ client_start(isc_task_t *task, isc_event_t *event) {
return; return;
if (TCP_CLIENT(client)) { if (TCP_CLIENT(client)) {
if (client->pipelined) { if (client->tcpconn != NULL) {
client_read(client, false); client_read(client, false);
} else { } else {
client_accept(client); client_accept(client);
@@ -2450,6 +2433,7 @@ ns__client_request(isc_task_t *task, isc_event_t *event) {
client->nrecvs--; client->nrecvs--;
} else { } else {
INSIST(TCP_CLIENT(client)); INSIST(TCP_CLIENT(client));
INSIST(client->tcpconn != NULL);
REQUIRE(event->ev_type == DNS_EVENT_TCPMSG); REQUIRE(event->ev_type == DNS_EVENT_TCPMSG);
REQUIRE(event->ev_sender == &client->tcpmsg); REQUIRE(event->ev_sender == &client->tcpmsg);
buffer = &client->tcpmsg.buffer; buffer = &client->tcpmsg.buffer;
@@ -2638,17 +2622,19 @@ ns__client_request(isc_task_t *task, isc_event_t *event) {
/* /*
* Pipeline TCP query processing. * Pipeline TCP query processing.
*/ */
if (client->message->opcode != dns_opcode_query) { if (TCP_CLIENT(client) &&
client->pipelined = false; client->message->opcode != dns_opcode_query)
{
client->tcpconn->pipelined = false;
} }
if (TCP_CLIENT(client) && client->pipelined) { if (TCP_CLIENT(client) && client->tcpconn->pipelined) {
/* /*
* We're pipelining. Replace the client; the * We're pipelining. Replace the client; the
* the replacement can read the TCP socket looking * replacement can read the TCP socket looking
* for new messages and this client can process the * for new messages and this one can process the
* current message asynchronously. * current message asynchronously.
* *
* There are now at least three clients using this * There will now be at least three clients using this
* TCP socket - one accepting new connections, * TCP socket - one accepting new connections,
* one reading an existing connection to get new * one reading an existing connection to get new
* messages, and one answering the message already * messages, and one answering the message already
@@ -2656,7 +2642,7 @@ ns__client_request(isc_task_t *task, isc_event_t *event) {
*/ */
result = ns_client_replace(client); result = ns_client_replace(client);
if (result != ISC_R_SUCCESS) { if (result != ISC_R_SUCCESS) {
client->pipelined = false; client->tcpconn->pipelined = false;
} }
} }
@@ -3210,10 +3196,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
dns_name_init(&client->signername, NULL); dns_name_init(&client->signername, NULL);
client->mortal = false; client->mortal = false;
client->sendcb = NULL; client->sendcb = NULL;
client->pipelined = false; client->tcpconn = NULL;
client->pipeline_refs = NULL;
client->tcpquota = NULL;
client->tcpattached = false;
client->recursionquota = NULL; client->recursionquota = NULL;
client->interface = NULL; client->interface = NULL;
client->peeraddr_valid = false; client->peeraddr_valid = false;
@@ -3318,10 +3301,11 @@ client_read(ns_client_t *client, bool newconn) {
static void static void
client_newconn(isc_task_t *task, isc_event_t *event) { client_newconn(isc_task_t *task, isc_event_t *event) {
isc_result_t result;
ns_client_t *client = event->ev_arg; ns_client_t *client = event->ev_arg;
isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event; isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event;
dns_aclenv_t *env = ns_interfacemgr_getaclenv(client->interface->mgr); dns_aclenv_t *env = ns_interfacemgr_getaclenv(client->interface->mgr);
isc_result_t result; uint32_t old;
REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN); REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN);
REQUIRE(NS_CLIENT_VALID(client)); REQUIRE(NS_CLIENT_VALID(client));
@@ -3341,10 +3325,8 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
INSIST(client->naccepts == 1); INSIST(client->naccepts == 1);
client->naccepts--; client->naccepts--;
LOCK(&client->interface->lock); old = atomic_fetch_sub(&client->interface->ntcpaccepting, 1);
INSIST(client->interface->ntcpaccepting > 0); INSIST(old > 0);
client->interface->ntcpaccepting--;
UNLOCK(&client->interface->lock);
/* /*
* We must take ownership of the new socket before the exit * We must take ownership of the new socket before the exit
@@ -3377,7 +3359,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3), NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
"accept failed: %s", "accept failed: %s",
isc_result_totext(nevent->result)); isc_result_totext(nevent->result));
tcpquota_disconnect(client); tcpconn_detach(client);
} }
if (exit_check(client)) if (exit_check(client))
@@ -3413,15 +3395,13 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
* telnetting to port 53 (once per CPU) will * telnetting to port 53 (once per CPU) will
* deny service to legitimate TCP clients. * deny service to legitimate TCP clients.
*/ */
client->pipelined = false;
result = ns_client_replace(client); result = ns_client_replace(client);
if (result == ISC_R_SUCCESS && if (result == ISC_R_SUCCESS &&
(client->sctx->keepresporder == NULL || (client->sctx->keepresporder == NULL ||
!dns_acl_allowed(&netaddr, NULL, !dns_acl_allowed(&netaddr, NULL,
client->sctx->keepresporder, env))) client->sctx->keepresporder, env)))
{ {
pipeline_init(client); client->tcpconn->pipelined = true;
client->pipelined = true;
} }
client_read(client, true); client_read(client, true);
@@ -3438,78 +3418,59 @@ client_accept(ns_client_t *client) {
CTRACE("accept"); CTRACE("accept");
/* /*
* The tcpquota object can only be simultaneously referenced a * Set up a new TCP connection. This means try to attach to the
* pre-defined number of times; this is configured by 'tcp-clients' * TCP client quota (tcp-clients), but fail if we're over quota.
* in named.conf. If we can't attach to it here, that means the TCP
* client quota has been exceeded.
*/ */
result = isc_quota_attach(&client->sctx->tcpquota, result = tcpconn_init(client, false);
&client->tcpquota);
if (result != ISC_R_SUCCESS) { if (result != ISC_R_SUCCESS) {
bool exit; bool exit;
ns_client_log(client, NS_LOGCATEGORY_CLIENT, ns_client_log(client, NS_LOGCATEGORY_CLIENT,
NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1), NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
"no more TCP clients: %s", "TCP client quota reached: %s",
isc_result_totext(result)); isc_result_totext(result));
/* /*
* We have exceeded the system-wide TCP client * We have exceeded the system-wide TCP client quota. But,
* quota. But, we can't just block this accept * we can't just block this accept in all cases, because if
* in all cases, because if we did, a heavy TCP * we did, a heavy TCP load on other interfaces might cause
* load on other interfaces might cause this * this interface to be starved, with no clients able to
* interface to be starved, with no clients able * accept new connections.
* to accept new connections. *
* * So, we check here to see if any other clients are
* So, we check here to see if any other clients * already servicing TCP queries on this interface (whether
* are already servicing TCP queries on this * accepting, reading, or processing). If we find at least
* interface (whether accepting, reading, or * one, then it's okay *not* to call accept - we can let this
* processing). If there are at least two * client go inactive and another will take over when it's
* (one reading and one processing a request) * done.
* then it's okay *not* to call accept - we *
* can let this client go inactive and another * If there aren't enough active clients on the interface,
* one will resume accepting when it's done. * then we can be a little bit flexible about the quota.
* * We'll allow *one* extra client through to ensure we're
* If there aren't enough active clients on the * listening on every interface; we do this by setting the
* interface, then we can be a little bit * 'force' option to tcpconn_init().
* flexible about the quota. We'll allow *one* *
* extra client through to ensure we're listening * (Note: In practice this means that the real TCP client
* on every interface. * quota is tcp-clients plus the number of listening
* * interfaces plus 1.)
* (Note: In practice this means that the real */
* TCP client quota is tcp-clients plus the exit = (atomic_load(&client->interface->ntcpactive) > 0U);
* number of listening interfaces plus 2.) if (exit) {
*/ client->newstate = NS_CLIENTSTATE_INACTIVE;
LOCK(&client->interface->lock); (void)exit_check(client);
exit = (client->interface->ntcpactive > 1); return;
UNLOCK(&client->interface->lock); }
if (exit) { result = tcpconn_init(client, true);
client->newstate = NS_CLIENTSTATE_INACTIVE; RUNTIME_CHECK(result == ISC_R_SUCCESS);
(void)exit_check(client);
return;
}
} else {
client->tcpattached = true;
} }
/* /*
* By incrementing the interface's ntcpactive counter we signal * If this client was set up using get_client() or get_worker(),
* that there is at least one client servicing TCP queries for the * then TCP is already marked active. However, if it was restarted
* interface. * from exit_check(), it might not be, so we take care of it now.
*
* We also make note of the fact in the client itself with the
* tcpactive flag. This ensures proper accounting by preventing
* us from accidentally incrementing or decrementing ntcpactive
* more than once per client object.
*/ */
if (!client->tcpactive) { mark_tcp_active(client, true);
LOCK(&client->interface->lock);
client->interface->ntcpactive++;
UNLOCK(&client->interface->lock);
client->tcpactive = true;
}
result = isc_socket_accept(client->tcplistener, client->task, result = isc_socket_accept(client->tcplistener, client->task,
client_newconn, client); client_newconn, client);
@@ -3525,15 +3486,8 @@ client_accept(ns_client_t *client) {
"isc_socket_accept() failed: %s", "isc_socket_accept() failed: %s",
isc_result_totext(result)); isc_result_totext(result));
tcpquota_disconnect(client); tcpconn_detach(client);
mark_tcp_active(client, false);
if (client->tcpactive) {
LOCK(&client->interface->lock);
client->interface->ntcpactive--;
UNLOCK(&client->interface->lock);
client->tcpactive = false;
}
return; return;
} }
@@ -3558,9 +3512,7 @@ client_accept(ns_client_t *client) {
* listening for connections itself to prevent the interface * listening for connections itself to prevent the interface
* going dead. * going dead.
*/ */
LOCK(&client->interface->lock); atomic_fetch_add(&client->interface->ntcpaccepting, 1);
client->interface->ntcpaccepting++;
UNLOCK(&client->interface->lock);
} }
static void static void
@@ -3631,24 +3583,25 @@ ns_client_replace(ns_client_t *client) {
REQUIRE(client->manager != NULL); REQUIRE(client->manager != NULL);
tcp = TCP_CLIENT(client); tcp = TCP_CLIENT(client);
if (tcp && client->pipelined) { if (tcp && client->tcpconn != NULL && client->tcpconn->pipelined) {
result = get_worker(client->manager, client->interface, result = get_worker(client->manager, client->interface,
client->tcpsocket, client); client->tcpsocket, client);
} else { } else {
result = get_client(client->manager, client->interface, result = get_client(client->manager, client->interface,
client->dispatch, client, tcp); client->dispatch, tcp);
/*
* The responsibility for listening for new requests is hereby
* transferred to the new client. Therefore, the old client
* should refrain from listening for any more requests.
*/
client->mortal = true;
} }
if (result != ISC_R_SUCCESS) { if (result != ISC_R_SUCCESS) {
return (result); return (result);
} }
/*
* The responsibility for listening for new requests is hereby
* transferred to the new client. Therefore, the old client
* should refrain from listening for any more requests.
*/
client->mortal = true;
return (ISC_R_SUCCESS); return (ISC_R_SUCCESS);
} }
@@ -3789,7 +3742,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
static isc_result_t static isc_result_t
get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp) dns_dispatch_t *disp, bool tcp)
{ {
isc_result_t result = ISC_R_SUCCESS; isc_result_t result = ISC_R_SUCCESS;
isc_event_t *ev; isc_event_t *ev;
@@ -3836,15 +3789,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
client->rcode_override = -1; /* not set */ client->rcode_override = -1; /* not set */
if (tcp) { if (tcp) {
client->tcpattached = false; mark_tcp_active(client, true);
if (oldclient != NULL) {
client->tcpattached = oldclient->tcpattached;
}
LOCK(&client->interface->lock);
client->interface->ntcpactive++;
UNLOCK(&client->interface->lock);
client->tcpactive = true;
client->attributes |= NS_CLIENTATTR_TCP; client->attributes |= NS_CLIENTATTR_TCP;
isc_socket_attach(ifp->tcpsocket, isc_socket_attach(ifp->tcpsocket,
@@ -3910,8 +3855,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
client->newstate = client->state = NS_CLIENTSTATE_WORKING; client->newstate = client->state = NS_CLIENTSTATE_WORKING;
INSIST(client->recursionquota == NULL); INSIST(client->recursionquota == NULL);
client->sctx = manager->sctx; client->sctx = manager->sctx;
client->tcpquota = &client->sctx->tcpquota;
client->tcpattached = oldclient->tcpattached;
client->dscp = ifp->dscp; client->dscp = ifp->dscp;
@@ -3920,8 +3863,8 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
client->sendcb = NULL; client->sendcb = NULL;
client->rcode_override = -1; /* not set */ client->rcode_override = -1; /* not set */
pipeline_attach(oldclient, client); tcpconn_attach(oldclient, client);
client->pipelined = true; mark_tcp_active(client, true);
isc_socket_attach(ifp->tcpsocket, &client->tcplistener); isc_socket_attach(ifp->tcpsocket, &client->tcplistener);
isc_socket_attach(sock, &client->tcpsocket); isc_socket_attach(sock, &client->tcpsocket);
@@ -3929,11 +3872,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
(void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr); (void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr);
client->peeraddr_valid = true; client->peeraddr_valid = true;
LOCK(&client->interface->lock);
client->interface->ntcpactive++;
UNLOCK(&client->interface->lock);
client->tcpactive = true;
INSIST(client->tcpmsg_valid == false); INSIST(client->tcpmsg_valid == false);
dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg); dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg);
client->tcpmsg_valid = true; client->tcpmsg_valid = true;
@@ -4008,8 +3946,7 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n,
MTRACE("createclients"); MTRACE("createclients");
for (disp = 0; disp < n; disp++) { for (disp = 0; disp < n; disp++) {
result = get_client(manager, ifp, ifp->udpdispatch[disp], result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp);
NULL, tcp);
if (result != ISC_R_SUCCESS) if (result != ISC_R_SUCCESS)
break; break;
} }

View File

@@ -80,6 +80,13 @@
*** Types *** Types
***/ ***/
/*% reference-counted TCP connection object */
typedef struct ns_tcpconn {
isc_refcount_t refs;
isc_quota_t *tcpquota;
bool pipelined;
} ns_tcpconn_t;
/*% nameserver client structure */ /*% nameserver client structure */
struct ns_client { struct ns_client {
unsigned int magic; unsigned int magic;
@@ -134,10 +141,7 @@ struct ns_client {
dns_name_t signername; /*%< [T]SIG key name */ dns_name_t signername; /*%< [T]SIG key name */
dns_name_t *signer; /*%< NULL if not valid sig */ dns_name_t *signer; /*%< NULL if not valid sig */
bool mortal; /*%< Die after handling request */ bool mortal; /*%< Die after handling request */
bool pipelined; /*%< TCP queries not in sequence */ ns_tcpconn_t *tcpconn;
isc_refcount_t *pipeline_refs;
isc_quota_t *tcpquota;
bool tcpattached;
isc_quota_t *recursionquota; isc_quota_t *recursionquota;
ns_interface_t *interface; ns_interface_t *interface;

View File

@@ -76,11 +76,11 @@ struct ns_interface {
/*%< UDP dispatchers. */ /*%< UDP dispatchers. */
isc_socket_t * tcpsocket; /*%< TCP socket. */ isc_socket_t * tcpsocket; /*%< TCP socket. */
isc_dscp_t dscp; /*%< "listen-on" DSCP value */ isc_dscp_t dscp; /*%< "listen-on" DSCP value */
int ntcpaccepting; /*%< Number of clients atomic_uint_fast32_t ntcpaccepting; /*%< Number of clients
ready to accept new ready to accept new
TCP connections on this TCP connections on this
interface */ interface */
int ntcpactive; /*%< Number of clients atomic_uint_fast32_t ntcpactive; /*%< Number of clients
servicing TCP queries servicing TCP queries
(whether accepting or (whether accepting or
connected) */ connected) */

View File

@@ -423,8 +423,9 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
* connections will be handled in parallel even though there is * connections will be handled in parallel even though there is
* only one client initially. * only one client initially.
*/ */
ifp->ntcpaccepting = 0; atomic_init(&ifp->ntcpaccepting, 0);
ifp->ntcpactive = 0; atomic_init(&ifp->ntcpactive, 0);
ifp->nudpdispatch = 0; ifp->nudpdispatch = 0;
ifp->dscp = -1; ifp->dscp = -1;