mirror of
https://gitlab.isc.org/isc-projects/bind9
synced 2025-08-29 05:28:00 +00:00
We introduce a isc_quota_attach_cb function - if ISC_R_QUOTA is returned at the time the function is called, then a callback will be called when there's quota available (with quota already attached). The callbacks are organized as a LIFO queue in the quota structure. It's needed for TCP client quota - with old networking code we had one single place where tcp clients quota was processed so we could resume accepting when the we had spare slots, but it's gone with netmgr - now we need to notify the listener/accepter that there's quota available so that it can resume accepting. Remove unused isc_quota_force() function. The isc_quote_reserve and isc_quota_release were used only internally from the quota.c and the tests. We should not expose API we are not using.
171 lines
4.1 KiB
C
171 lines
4.1 KiB
C
/*
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
*
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
*
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
* information regarding copyright ownership.
|
|
*/
|
|
|
|
/*! \file */
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <isc/atomic.h>
|
|
#include <isc/quota.h>
|
|
#include <isc/util.h>
|
|
|
|
void
|
|
isc_quota_init(isc_quota_t *quota, unsigned int max) {
|
|
atomic_init("a->max, max);
|
|
atomic_init("a->used, 0);
|
|
atomic_init("a->soft, 0);
|
|
atomic_init("a->waiting, 0);
|
|
ISC_LIST_INIT(quota->cbs);
|
|
isc_mutex_init("a->cblock);
|
|
}
|
|
|
|
void
|
|
isc_quota_destroy(isc_quota_t *quota) {
|
|
INSIST(atomic_load("a->used) == 0);
|
|
INSIST(atomic_load("a->waiting) == 0);
|
|
INSIST(ISC_LIST_EMPTY(quota->cbs));
|
|
atomic_store_release("a->max, 0);
|
|
atomic_store_release("a->used, 0);
|
|
atomic_store_release("a->soft, 0);
|
|
isc_mutex_destroy("a->cblock);
|
|
}
|
|
|
|
void
|
|
isc_quota_soft(isc_quota_t *quota, unsigned int soft) {
|
|
atomic_store_release("a->soft, soft);
|
|
}
|
|
|
|
void
|
|
isc_quota_max(isc_quota_t *quota, unsigned int max) {
|
|
atomic_store_release("a->max, max);
|
|
}
|
|
|
|
unsigned int
|
|
isc_quota_getmax(isc_quota_t *quota) {
|
|
return (atomic_load_relaxed("a->max));
|
|
}
|
|
|
|
unsigned int
|
|
isc_quota_getsoft(isc_quota_t *quota) {
|
|
return (atomic_load_relaxed("a->soft));
|
|
}
|
|
|
|
unsigned int
|
|
isc_quota_getused(isc_quota_t *quota) {
|
|
return (atomic_load_relaxed("a->used));
|
|
}
|
|
|
|
static isc_result_t
|
|
quota_reserve(isc_quota_t *quota) {
|
|
isc_result_t result;
|
|
uint_fast32_t max = atomic_load_acquire("a->max);
|
|
uint_fast32_t soft = atomic_load_acquire("a->soft);
|
|
uint_fast32_t used = atomic_load_acquire("a->used);
|
|
do {
|
|
if (max != 0 && used >= max) {
|
|
return (ISC_R_QUOTA);
|
|
}
|
|
if (soft != 0 && used >= soft) {
|
|
result = ISC_R_SOFTQUOTA;
|
|
} else {
|
|
result = ISC_R_SUCCESS;
|
|
}
|
|
} while (!atomic_compare_exchange_weak_acq_rel("a->used, &used,
|
|
used + 1));
|
|
return (result);
|
|
}
|
|
|
|
/* Must be quota->cbslock locked */
|
|
static void
|
|
enqueue(isc_quota_t *quota, isc_quota_cb_t *cb) {
|
|
REQUIRE(cb != NULL);
|
|
ISC_LIST_ENQUEUE(quota->cbs, cb, link);
|
|
atomic_fetch_add_release("a->waiting, 1);
|
|
}
|
|
|
|
/* Must be quota->cbslock locked */
|
|
static isc_quota_cb_t *
|
|
dequeue(isc_quota_t *quota) {
|
|
isc_quota_cb_t *cb = ISC_LIST_HEAD(quota->cbs);
|
|
INSIST(cb != NULL);
|
|
ISC_LIST_DEQUEUE(quota->cbs, cb, link);
|
|
atomic_fetch_sub_relaxed("a->waiting, 1);
|
|
return (cb);
|
|
}
|
|
|
|
static void
|
|
quota_release(isc_quota_t *quota) {
|
|
/*
|
|
* This is opportunistic - we might race with a failing quota_attach_cb
|
|
* and not detect that something is waiting, but eventually someone will
|
|
* be releasing quota and will detect it, so we don't need to worry -
|
|
* and we're saving a lot by not locking cblock every time.
|
|
*/
|
|
|
|
if (atomic_load_acquire("a->waiting) > 0) {
|
|
isc_quota_cb_t *cb = NULL;
|
|
LOCK("a->cblock);
|
|
if (atomic_load_relaxed("a->waiting) > 0) {
|
|
cb = dequeue(quota);
|
|
}
|
|
UNLOCK("a->cblock);
|
|
if (cb != NULL) {
|
|
cb->cb_func(quota, cb->data);
|
|
return;
|
|
}
|
|
}
|
|
|
|
INSIST(atomic_fetch_sub_release("a->used, 1) > 0);
|
|
}
|
|
|
|
static isc_result_t
|
|
doattach(isc_quota_t *quota, isc_quota_t **p) {
|
|
isc_result_t result;
|
|
REQUIRE(p != NULL && *p == NULL);
|
|
|
|
result = quota_reserve(quota);
|
|
if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA) {
|
|
*p = quota;
|
|
}
|
|
|
|
return (result);
|
|
}
|
|
|
|
isc_result_t
|
|
isc_quota_attach(isc_quota_t *quota, isc_quota_t **p) {
|
|
return (isc_quota_attach_cb(quota, p, NULL));
|
|
}
|
|
|
|
isc_result_t
|
|
isc_quota_attach_cb(isc_quota_t *quota, isc_quota_t **p, isc_quota_cb_t *cb) {
|
|
isc_result_t result = doattach(quota, p);
|
|
if (result == ISC_R_QUOTA && cb != NULL) {
|
|
LOCK("a->cblock);
|
|
enqueue(quota, cb);
|
|
UNLOCK("a->cblock);
|
|
}
|
|
return (result);
|
|
}
|
|
|
|
void
|
|
isc_quota_cb_init(isc_quota_cb_t *cb, isc_quota_cb_func_t cb_func, void *data) {
|
|
ISC_LINK_INIT(cb, link);
|
|
cb->cb_func = cb_func;
|
|
cb->data = data;
|
|
}
|
|
|
|
void
|
|
isc_quota_detach(isc_quota_t **p) {
|
|
INSIST(p != NULL && *p != NULL);
|
|
quota_release(*p);
|
|
*p = NULL;
|
|
}
|