2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-09-04 16:45:24 +00:00

Improve qp-trie compaction in write transactions

In general, it's better to do one thorough compaction when a batch of
work is complete, which is the way that `update` transactions work.
Conversely, `write` transactions are designed so that lots of little
transactions are not too inefficient, but they need explicit
compaction. This changes `dns_qp_compact()` so that it is easier to
compact any time that makes sense, if there isn't a better way to
schedule compaction. And `dns_qpmulti_commit()` only recycles garbage
when there is enough to make it worthwhile.
This commit is contained in:
Tony Finch
2023-02-16 21:42:04 +00:00
parent a8b29f0365
commit 0858514ae8
3 changed files with 41 additions and 25 deletions

View File

@@ -230,6 +230,15 @@ typedef struct dns_qp_memusage {
bool fragmented; /*%< trie needs compaction */ bool fragmented; /*%< trie needs compaction */
} dns_qp_memusage_t; } dns_qp_memusage_t;
/*%
* Choice of mode for `dns_qp_compact()`
*/
typedef enum dns_qpgc {
DNS_QPGC_MAYBE,
DNS_QPGC_NOW,
DNS_QPGC_ALL,
} dns_qpgc_t;
/*********************************************************************** /***********************************************************************
* *
* functions - create, destory, enquire * functions - create, destory, enquire
@@ -298,23 +307,28 @@ dns_qpmulti_destroy(dns_qpmulti_t **qpmp);
*/ */
void void
dns_qp_compact(dns_qp_t *qp, bool all); dns_qp_compact(dns_qp_t *qp, dns_qpgc_t mode);
/*%< /*%<
* Defragment the qp-trie and release unused memory. * Defragment the qp-trie and release unused memory.
* *
* When modifications make a trie too fragmented, it is automatically * When modifications make a trie too fragmented, it is automatically
* compacted. However, automatic compaction is limited when a * compacted. However, automatic compaction is limited when a
* multithreaded trie has lots of immutable memory from past * multithreaded trie has lots of immutable memory from past
* transactions, and lightweight write transactions do not do * transactions, and lightweight write transactions do not compact on
* commit like heavyweight update transactions.
* *
* This function can be used with a single-threaded qp-trie and during a * This function can be used with a single-threaded qp-trie and during a
* transaction on a multi-threaded trie. * transaction on a multi-threaded trie.
* *
* \li If `mode == DNS_QPGC_MAYBE`, the trie is cleaned if it is fragmented
*
* \li If `mode == DNS_QPGC_NOW`, the trie is cleaned while avoiding
* unnecessary work
*
* \li If `mode == DNS_QPGC_ALL`, the entire trie is compacted
*
* Requires: * Requires:
* \li `qp` is a pointer to a valid qp-trie * \li `qp` is a pointer to a valid qp-trie
* \li `all` is true, to compact the whole trie
* \li `all` is false, to save time by not compacting
* chunk that are not fragmented
*/ */
void void

View File

@@ -877,9 +877,14 @@ compact(dns_qp_t *qp) {
} }
void void
dns_qp_compact(dns_qp_t *qp, bool all) { dns_qp_compact(dns_qp_t *qp, dns_qpgc_t mode) {
REQUIRE(QP_VALID(qp)); REQUIRE(QP_VALID(qp));
qp->compact_all = all; if (mode == DNS_QPGC_MAYBE && !QP_NEEDGC(qp)) {
return;
}
if (mode == DNS_QPGC_ALL) {
qp->compact_all = true;
}
compact(qp); compact(qp);
recycle(qp); recycle(qp);
} }
@@ -907,7 +912,7 @@ dns_qp_compact(dns_qp_t *qp, bool all) {
static inline bool static inline bool
squash_twigs(dns_qp_t *qp, qp_ref_t twigs, qp_weight_t size) { squash_twigs(dns_qp_t *qp, qp_ref_t twigs, qp_weight_t size) {
bool destroyed = free_twigs(qp, twigs, size); bool destroyed = free_twigs(qp, twigs, size);
if (destroyed && QP_MAX_GARBAGE(qp)) { if (destroyed && QP_AUTOGC(qp)) {
compact(qp); compact(qp);
recycle(qp); recycle(qp);
/* /*
@@ -916,7 +921,7 @@ squash_twigs(dns_qp_t *qp, qp_ref_t twigs, qp_weight_t size) {
* time and space, but recovery should be cheaper than * time and space, but recovery should be cheaper than
* letting compact+recycle fail repeatedly. * letting compact+recycle fail repeatedly.
*/ */
if (QP_MAX_GARBAGE(qp)) { if (QP_AUTOGC(qp)) {
isc_log_write(dns_lctx, DNS_LOGCATEGORY_DATABASE, isc_log_write(dns_lctx, DNS_LOGCATEGORY_DATABASE,
DNS_LOGMODULE_QP, ISC_LOG_NOTICE, DNS_LOGMODULE_QP, ISC_LOG_NOTICE,
"qp %p uctx \"%s\" compact/recycle " "qp %p uctx \"%s\" compact/recycle "
@@ -947,16 +952,9 @@ dns_qp_memusage(dns_qp_t *qp) {
.free = qp->free_count, .free = qp->free_count,
.node_size = sizeof(qp_node_t), .node_size = sizeof(qp_node_t),
.chunk_size = QP_CHUNK_SIZE, .chunk_size = QP_CHUNK_SIZE,
.fragmented = QP_NEEDGC(qp),
}; };
/*
* tell the caller about fragmentation of the whole trie
* without discounting immutable chunks
*/
qp->hold_count = 0;
memusage.fragmented = QP_MAX_GARBAGE(qp);
qp->hold_count = memusage.hold;
for (qp_chunk_t chunk = 0; chunk < qp->chunk_max; chunk++) { for (qp_chunk_t chunk = 0; chunk < qp->chunk_max; chunk++) {
if (qp->base->ptr[chunk] != NULL) { if (qp->base->ptr[chunk] != NULL) {
memusage.chunk_count += 1; memusage.chunk_count += 1;
@@ -1037,7 +1035,7 @@ transaction_open(dns_qpmulti_t *multi, dns_qp_t **qptp) {
} }
/* /*
* Ensure QP_MAX_GARBAGE() ignores free space in immutable chunks. * Ensure QP_AUTOGC() ignores free space in immutable chunks.
*/ */
qp->hold_count = qp->free_count; qp->hold_count = qp->free_count;
@@ -1144,15 +1142,15 @@ dns_qpmulti_commit(dns_qpmulti_t *multi, dns_qp_t **qptp) {
free_twigs(qp, multi->reader_ref, READER_SIZE); free_twigs(qp, multi->reader_ref, READER_SIZE);
} }
if (qp->transaction_mode == QP_WRITE) { if (qp->transaction_mode == QP_UPDATE) {
multi->reader_ref = alloc_twigs(qp, READER_SIZE);
} else {
/* minimize memory overhead */ /* minimize memory overhead */
compact(qp); compact(qp);
multi->reader_ref = alloc_twigs(qp, READER_SIZE); multi->reader_ref = alloc_twigs(qp, READER_SIZE);
qp->base->ptr[qp->bump] = chunk_shrink_raw( qp->base->ptr[qp->bump] = chunk_shrink_raw(
qp, qp->base->ptr[qp->bump], qp, qp->base->ptr[qp->bump],
qp->usage[qp->bump].used * sizeof(qp_node_t)); qp->usage[qp->bump].used * sizeof(qp_node_t));
} else {
multi->reader_ref = alloc_twigs(qp, READER_SIZE);
} }
/* anchor a new version of the trie */ /* anchor a new version of the trie */
@@ -1165,7 +1163,9 @@ dns_qpmulti_commit(dns_qpmulti_t *multi, dns_qp_t **qptp) {
atomic_store_release(&multi->reader, reader); /* COMMIT */ atomic_store_release(&multi->reader, reader); /* COMMIT */
/* clean up what we can right now */ /* clean up what we can right now */
recycle(qp); if (qp->transaction_mode == QP_UPDATE || QP_NEEDGC(qp)) {
recycle(qp);
}
/* the reclamation phase must be sampled after the commit */ /* the reclamation phase must be sampled after the commit */
isc_qsbr_phase_t phase = isc_qsbr_phase(multi->loopmgr); isc_qsbr_phase_t phase = isc_qsbr_phase(multi->loopmgr);

View File

@@ -171,9 +171,11 @@ STATIC_ASSERT(6 <= QP_CHUNK_LOG && QP_CHUNK_LOG <= 20,
* commits. But they cannot be recovered immediately so they are also * commits. But they cannot be recovered immediately so they are also
* counted as on hold, and discounted when we decide whether to compact. * counted as on hold, and discounted when we decide whether to compact.
*/ */
#define QP_MAX_GARBAGE(qp) \ #define QP_GC_HEURISTIC(qp, free) \
(((qp)->free_count - (qp)->hold_count) > QP_CHUNK_SIZE * 4 && \ ((free) > QP_CHUNK_SIZE * 4 && (free) > (qp)->used_count / 2)
((qp)->free_count - (qp)->hold_count) > (qp)->used_count / 2)
#define QP_NEEDGC(qp) QP_GC_HEURISTIC(qp, (qp)->free_count)
#define QP_AUTOGC(qp) QP_GC_HEURISTIC(qp, (qp)->free_count - (qp)->hold_count)
/* /*
* The chunk base and usage arrays are resized geometically and start off * The chunk base and usage arrays are resized geometically and start off