2022-06-14 16:20:28 +01:00
|
|
|
/*
|
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: MPL-2.0
|
|
|
|
*
|
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
|
|
|
*/
|
|
|
|
|
2023-05-18 15:12:23 +02:00
|
|
|
#include <inttypes.h>
|
2022-06-14 16:20:28 +01:00
|
|
|
#include <sched.h> /* IWYU pragma: keep */
|
|
|
|
#include <setjmp.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#define UNIT_TESTING
|
|
|
|
#include <cmocka.h>
|
|
|
|
|
2023-02-09 14:37:43 +00:00
|
|
|
#include <isc/random.h>
|
|
|
|
#include <isc/refcount.h>
|
2022-06-14 16:20:28 +01:00
|
|
|
#include <isc/result.h>
|
|
|
|
#include <isc/string.h>
|
2023-03-08 14:52:30 +00:00
|
|
|
#include <isc/urcu.h>
|
2022-06-14 16:20:28 +01:00
|
|
|
#include <isc/util.h>
|
|
|
|
|
|
|
|
#include <dns/name.h>
|
|
|
|
#include <dns/qp.h>
|
|
|
|
|
2023-02-09 14:37:43 +00:00
|
|
|
#include "qp_p.h"
|
|
|
|
|
2022-06-14 16:20:28 +01:00
|
|
|
#include <tests/dns.h>
|
|
|
|
#include <tests/qp.h>
|
|
|
|
|
|
|
|
ISC_RUN_TEST_IMPL(qpkey_name) {
|
|
|
|
struct {
|
|
|
|
const char *namestr;
|
|
|
|
uint8_t key[512];
|
|
|
|
size_t len;
|
|
|
|
} testcases[] = {
|
2023-09-06 23:57:42 -07:00
|
|
|
{
|
|
|
|
.namestr = "",
|
|
|
|
.key = { 0x02 },
|
|
|
|
.len = 0,
|
|
|
|
},
|
2022-06-14 16:20:28 +01:00
|
|
|
{
|
|
|
|
.namestr = ".",
|
Refactor qp-trie to use QSBR
The first working multi-threaded qp-trie was stuck with an unpleasant
trade-off:
* Use `isc_rwlock`, which has acceptable write performance, but
terrible read scalability because the qp-trie made all accesses
through a single lock.
* Use `liburcu`, which has great read scalability, but terrible
write performance, because I was relying on `rcu_synchronize()`
which is rather slow. And `liburcu` is LGPL.
To get the best of both worlds, we need our own scalable read side,
which we now have with `isc_qsbr`. And we need to modify the write
side so that it is not blocked by readers.
Better write performance requires an async cleanup function like
`call_rcu()`, instead of the blocking `rcu_synchronize()`. (There
is no blocking cleanup in `isc_qsbr`, because I have concluded
that it would be an attractive nuisance.)
Until now, all my multithreading qp-trie designs have been based
around two versions, read-only and mutable. This is too few to
work with asynchronous cleanup. The bare minimum (as in epoch
based reclamation) is three, but it makes more sense to support an
arbitrary number. Doing multi-version support "properly" makes
fewer assumptions about how safe memory reclamation works, and it
makes snapshots and rollbacks simpler.
To avoid making the memory management even more complicated, I
have introduced a new kind of "packed reader node" to anchor the
root of a version of the trie. This is simpler because it re-uses
the existing chunk lifetime logic - see the discussion under
"packed reader nodes" in `qp_p.h`.
I have also made the chunk lifetime logic simpler. The idea of a
"generation" is gone; instead, chunks are either mutable or
immutable. And the QSBR phase number is used to indicate when a
chunk can be reclaimed.
Instead of the `shared_base` flag (which was basically a one-bit
reference count, with a two version limit) the base array now has a
refcount, which replaces the confusing ad-hoc lifetime logic with
something more familiar and systematic.
2022-12-22 14:55:14 +00:00
|
|
|
.key = { 0x02, 0x02 },
|
2022-06-14 16:20:28 +01:00
|
|
|
.len = 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.namestr = "\\000",
|
Refactor qp-trie to use QSBR
The first working multi-threaded qp-trie was stuck with an unpleasant
trade-off:
* Use `isc_rwlock`, which has acceptable write performance, but
terrible read scalability because the qp-trie made all accesses
through a single lock.
* Use `liburcu`, which has great read scalability, but terrible
write performance, because I was relying on `rcu_synchronize()`
which is rather slow. And `liburcu` is LGPL.
To get the best of both worlds, we need our own scalable read side,
which we now have with `isc_qsbr`. And we need to modify the write
side so that it is not blocked by readers.
Better write performance requires an async cleanup function like
`call_rcu()`, instead of the blocking `rcu_synchronize()`. (There
is no blocking cleanup in `isc_qsbr`, because I have concluded
that it would be an attractive nuisance.)
Until now, all my multithreading qp-trie designs have been based
around two versions, read-only and mutable. This is too few to
work with asynchronous cleanup. The bare minimum (as in epoch
based reclamation) is three, but it makes more sense to support an
arbitrary number. Doing multi-version support "properly" makes
fewer assumptions about how safe memory reclamation works, and it
makes snapshots and rollbacks simpler.
To avoid making the memory management even more complicated, I
have introduced a new kind of "packed reader node" to anchor the
root of a version of the trie. This is simpler because it re-uses
the existing chunk lifetime logic - see the discussion under
"packed reader nodes" in `qp_p.h`.
I have also made the chunk lifetime logic simpler. The idea of a
"generation" is gone; instead, chunks are either mutable or
immutable. And the QSBR phase number is used to indicate when a
chunk can be reclaimed.
Instead of the `shared_base` flag (which was basically a one-bit
reference count, with a two version limit) the base array now has a
refcount, which replaces the confusing ad-hoc lifetime logic with
something more familiar and systematic.
2022-12-22 14:55:14 +00:00
|
|
|
.key = { 0x03, 0x03, 0x02, 0x02 },
|
2022-06-14 16:20:28 +01:00
|
|
|
.len = 3,
|
|
|
|
},
|
2023-09-06 23:57:42 -07:00
|
|
|
{
|
|
|
|
.namestr = "com",
|
|
|
|
.key = { 0x16, 0x22, 0x20, 0x02 },
|
|
|
|
.len = 4,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.namestr = "com.",
|
|
|
|
.key = { 0x02, 0x16, 0x22, 0x20, 0x02 },
|
|
|
|
.len = 5,
|
|
|
|
},
|
2022-06-14 16:20:28 +01:00
|
|
|
{
|
|
|
|
.namestr = "example.com.",
|
Refactor qp-trie to use QSBR
The first working multi-threaded qp-trie was stuck with an unpleasant
trade-off:
* Use `isc_rwlock`, which has acceptable write performance, but
terrible read scalability because the qp-trie made all accesses
through a single lock.
* Use `liburcu`, which has great read scalability, but terrible
write performance, because I was relying on `rcu_synchronize()`
which is rather slow. And `liburcu` is LGPL.
To get the best of both worlds, we need our own scalable read side,
which we now have with `isc_qsbr`. And we need to modify the write
side so that it is not blocked by readers.
Better write performance requires an async cleanup function like
`call_rcu()`, instead of the blocking `rcu_synchronize()`. (There
is no blocking cleanup in `isc_qsbr`, because I have concluded
that it would be an attractive nuisance.)
Until now, all my multithreading qp-trie designs have been based
around two versions, read-only and mutable. This is too few to
work with asynchronous cleanup. The bare minimum (as in epoch
based reclamation) is three, but it makes more sense to support an
arbitrary number. Doing multi-version support "properly" makes
fewer assumptions about how safe memory reclamation works, and it
makes snapshots and rollbacks simpler.
To avoid making the memory management even more complicated, I
have introduced a new kind of "packed reader node" to anchor the
root of a version of the trie. This is simpler because it re-uses
the existing chunk lifetime logic - see the discussion under
"packed reader nodes" in `qp_p.h`.
I have also made the chunk lifetime logic simpler. The idea of a
"generation" is gone; instead, chunks are either mutable or
immutable. And the QSBR phase number is used to indicate when a
chunk can be reclaimed.
Instead of the `shared_base` flag (which was basically a one-bit
reference count, with a two version limit) the base array now has a
refcount, which replaces the confusing ad-hoc lifetime logic with
something more familiar and systematic.
2022-12-22 14:55:14 +00:00
|
|
|
.key = { 0x02, 0x16, 0x22, 0x20, 0x02, 0x18, 0x2b, 0x14,
|
|
|
|
0x20, 0x23, 0x1f, 0x18, 0x02, 0x02 },
|
2022-06-14 16:20:28 +01:00
|
|
|
.len = 13,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.namestr = "example.com",
|
Refactor qp-trie to use QSBR
The first working multi-threaded qp-trie was stuck with an unpleasant
trade-off:
* Use `isc_rwlock`, which has acceptable write performance, but
terrible read scalability because the qp-trie made all accesses
through a single lock.
* Use `liburcu`, which has great read scalability, but terrible
write performance, because I was relying on `rcu_synchronize()`
which is rather slow. And `liburcu` is LGPL.
To get the best of both worlds, we need our own scalable read side,
which we now have with `isc_qsbr`. And we need to modify the write
side so that it is not blocked by readers.
Better write performance requires an async cleanup function like
`call_rcu()`, instead of the blocking `rcu_synchronize()`. (There
is no blocking cleanup in `isc_qsbr`, because I have concluded
that it would be an attractive nuisance.)
Until now, all my multithreading qp-trie designs have been based
around two versions, read-only and mutable. This is too few to
work with asynchronous cleanup. The bare minimum (as in epoch
based reclamation) is three, but it makes more sense to support an
arbitrary number. Doing multi-version support "properly" makes
fewer assumptions about how safe memory reclamation works, and it
makes snapshots and rollbacks simpler.
To avoid making the memory management even more complicated, I
have introduced a new kind of "packed reader node" to anchor the
root of a version of the trie. This is simpler because it re-uses
the existing chunk lifetime logic - see the discussion under
"packed reader nodes" in `qp_p.h`.
I have also made the chunk lifetime logic simpler. The idea of a
"generation" is gone; instead, chunks are either mutable or
immutable. And the QSBR phase number is used to indicate when a
chunk can be reclaimed.
Instead of the `shared_base` flag (which was basically a one-bit
reference count, with a two version limit) the base array now has a
refcount, which replaces the confusing ad-hoc lifetime logic with
something more familiar and systematic.
2022-12-22 14:55:14 +00:00
|
|
|
.key = { 0x16, 0x22, 0x20, 0x02, 0x18, 0x2b, 0x14, 0x20,
|
|
|
|
0x23, 0x1f, 0x18, 0x02, 0x02 },
|
2022-06-14 16:20:28 +01:00
|
|
|
.len = 12,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.namestr = "EXAMPLE.COM",
|
Refactor qp-trie to use QSBR
The first working multi-threaded qp-trie was stuck with an unpleasant
trade-off:
* Use `isc_rwlock`, which has acceptable write performance, but
terrible read scalability because the qp-trie made all accesses
through a single lock.
* Use `liburcu`, which has great read scalability, but terrible
write performance, because I was relying on `rcu_synchronize()`
which is rather slow. And `liburcu` is LGPL.
To get the best of both worlds, we need our own scalable read side,
which we now have with `isc_qsbr`. And we need to modify the write
side so that it is not blocked by readers.
Better write performance requires an async cleanup function like
`call_rcu()`, instead of the blocking `rcu_synchronize()`. (There
is no blocking cleanup in `isc_qsbr`, because I have concluded
that it would be an attractive nuisance.)
Until now, all my multithreading qp-trie designs have been based
around two versions, read-only and mutable. This is too few to
work with asynchronous cleanup. The bare minimum (as in epoch
based reclamation) is three, but it makes more sense to support an
arbitrary number. Doing multi-version support "properly" makes
fewer assumptions about how safe memory reclamation works, and it
makes snapshots and rollbacks simpler.
To avoid making the memory management even more complicated, I
have introduced a new kind of "packed reader node" to anchor the
root of a version of the trie. This is simpler because it re-uses
the existing chunk lifetime logic - see the discussion under
"packed reader nodes" in `qp_p.h`.
I have also made the chunk lifetime logic simpler. The idea of a
"generation" is gone; instead, chunks are either mutable or
immutable. And the QSBR phase number is used to indicate when a
chunk can be reclaimed.
Instead of the `shared_base` flag (which was basically a one-bit
reference count, with a two version limit) the base array now has a
refcount, which replaces the confusing ad-hoc lifetime logic with
something more familiar and systematic.
2022-12-22 14:55:14 +00:00
|
|
|
.key = { 0x16, 0x22, 0x20, 0x02, 0x18, 0x2b, 0x14, 0x20,
|
|
|
|
0x23, 0x1f, 0x18, 0x02, 0x02 },
|
2022-06-14 16:20:28 +01:00
|
|
|
.len = 12,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(testcases); i++) {
|
|
|
|
size_t len;
|
|
|
|
dns_qpkey_t key;
|
|
|
|
dns_fixedname_t fn1, fn2;
|
|
|
|
dns_name_t *in = NULL, *out = NULL;
|
|
|
|
|
2023-09-06 23:57:42 -07:00
|
|
|
in = dns_fixedname_initname(&fn1);
|
|
|
|
if (testcases[i].len != 0) {
|
|
|
|
dns_test_namefromstring(testcases[i].namestr, &fn1);
|
|
|
|
}
|
2022-06-14 16:20:28 +01:00
|
|
|
len = dns_qpkey_fromname(key, in);
|
|
|
|
|
Refactor qp-trie to use QSBR
The first working multi-threaded qp-trie was stuck with an unpleasant
trade-off:
* Use `isc_rwlock`, which has acceptable write performance, but
terrible read scalability because the qp-trie made all accesses
through a single lock.
* Use `liburcu`, which has great read scalability, but terrible
write performance, because I was relying on `rcu_synchronize()`
which is rather slow. And `liburcu` is LGPL.
To get the best of both worlds, we need our own scalable read side,
which we now have with `isc_qsbr`. And we need to modify the write
side so that it is not blocked by readers.
Better write performance requires an async cleanup function like
`call_rcu()`, instead of the blocking `rcu_synchronize()`. (There
is no blocking cleanup in `isc_qsbr`, because I have concluded
that it would be an attractive nuisance.)
Until now, all my multithreading qp-trie designs have been based
around two versions, read-only and mutable. This is too few to
work with asynchronous cleanup. The bare minimum (as in epoch
based reclamation) is three, but it makes more sense to support an
arbitrary number. Doing multi-version support "properly" makes
fewer assumptions about how safe memory reclamation works, and it
makes snapshots and rollbacks simpler.
To avoid making the memory management even more complicated, I
have introduced a new kind of "packed reader node" to anchor the
root of a version of the trie. This is simpler because it re-uses
the existing chunk lifetime logic - see the discussion under
"packed reader nodes" in `qp_p.h`.
I have also made the chunk lifetime logic simpler. The idea of a
"generation" is gone; instead, chunks are either mutable or
immutable. And the QSBR phase number is used to indicate when a
chunk can be reclaimed.
Instead of the `shared_base` flag (which was basically a one-bit
reference count, with a two version limit) the base array now has a
refcount, which replaces the confusing ad-hoc lifetime logic with
something more familiar and systematic.
2022-12-22 14:55:14 +00:00
|
|
|
assert_int_equal(testcases[i].len, len);
|
|
|
|
assert_memory_equal(testcases[i].key, key, len);
|
2023-09-06 23:57:42 -07:00
|
|
|
/* also check key correctness for empty name */
|
|
|
|
if (len == 0) {
|
|
|
|
assert_int_equal(testcases[i].key[0], ((char *)key)[0]);
|
|
|
|
}
|
2022-06-14 16:20:28 +01:00
|
|
|
|
|
|
|
out = dns_fixedname_initname(&fn2);
|
2023-09-06 23:57:42 -07:00
|
|
|
dns_qpkey_toname(key, len, out);
|
2022-06-14 16:20:28 +01:00
|
|
|
assert_true(dns_name_equal(in, out));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ISC_RUN_TEST_IMPL(qpkey_sort) {
|
|
|
|
struct {
|
|
|
|
const char *namestr;
|
|
|
|
dns_name_t *name;
|
|
|
|
dns_fixedname_t fixed;
|
|
|
|
size_t len;
|
|
|
|
dns_qpkey_t key;
|
|
|
|
} testcases[] = {
|
|
|
|
{ .namestr = "." },
|
|
|
|
{ .namestr = "\\000." },
|
|
|
|
{ .namestr = "example.com." },
|
|
|
|
{ .namestr = "EXAMPLE.COM." },
|
|
|
|
{ .namestr = "www.example.com." },
|
|
|
|
{ .namestr = "exam.com." },
|
|
|
|
{ .namestr = "exams.com." },
|
|
|
|
{ .namestr = "exam\\000.com." },
|
|
|
|
};
|
|
|
|
|
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(testcases); i++) {
|
|
|
|
dns_test_namefromstring(testcases[i].namestr,
|
|
|
|
&testcases[i].fixed);
|
|
|
|
testcases[i].name = dns_fixedname_name(&testcases[i].fixed);
|
|
|
|
testcases[i].len = dns_qpkey_fromname(testcases[i].key,
|
|
|
|
testcases[i].name);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(testcases); i++) {
|
|
|
|
for (size_t j = 0; j < ARRAY_SIZE(testcases); j++) {
|
|
|
|
int namecmp = dns_name_compare(testcases[i].name,
|
|
|
|
testcases[j].name);
|
|
|
|
size_t len = ISC_MIN(testcases[i].len,
|
|
|
|
testcases[j].len);
|
|
|
|
/* include extra terminating NOBYTE */
|
|
|
|
int keycmp = memcmp(testcases[i].key, testcases[j].key,
|
|
|
|
len + 1);
|
|
|
|
assert_true((namecmp < 0) == (keycmp < 0));
|
|
|
|
assert_true((namecmp == 0) == (keycmp == 0));
|
|
|
|
assert_true((namecmp > 0) == (keycmp > 0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-09 14:37:43 +00:00
|
|
|
#define ITER_ITEMS 100
|
|
|
|
|
2023-03-10 15:55:00 +00:00
|
|
|
static void
|
2023-02-09 14:37:43 +00:00
|
|
|
check_leaf(void *uctx, void *pval, uint32_t ival) {
|
|
|
|
uint32_t *items = uctx;
|
|
|
|
assert_in_range(ival, 1, ITER_ITEMS - 1);
|
|
|
|
assert_ptr_equal(items + ival, pval);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
qpiter_makekey(dns_qpkey_t key, void *uctx, void *pval, uint32_t ival) {
|
|
|
|
check_leaf(uctx, pval, ival);
|
|
|
|
|
|
|
|
char str[8];
|
|
|
|
snprintf(str, sizeof(str), "%03u", ival);
|
|
|
|
|
|
|
|
size_t i = 0;
|
|
|
|
while (str[i] != '\0') {
|
|
|
|
key[i] = str[i] - '0' + SHIFT_BITMAP;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
key[i++] = SHIFT_NOBYTE;
|
|
|
|
|
|
|
|
return (i);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
getname(void *uctx, char *buf, size_t size) {
|
|
|
|
strlcpy(buf, "test", size);
|
|
|
|
UNUSED(uctx);
|
|
|
|
UNUSED(size);
|
|
|
|
}
|
|
|
|
|
2023-03-10 15:55:00 +00:00
|
|
|
const dns_qpmethods_t qpiter_methods = {
|
2023-02-09 14:37:43 +00:00
|
|
|
check_leaf,
|
|
|
|
check_leaf,
|
|
|
|
qpiter_makekey,
|
|
|
|
getname,
|
|
|
|
};
|
|
|
|
|
|
|
|
ISC_RUN_TEST_IMPL(qpiter) {
|
|
|
|
dns_qp_t *qp = NULL;
|
|
|
|
uint32_t item[ITER_ITEMS] = { 0 };
|
|
|
|
|
|
|
|
dns_qp_create(mctx, &qpiter_methods, item, &qp);
|
|
|
|
for (size_t tests = 0; tests < 1234; tests++) {
|
|
|
|
uint32_t ival = isc_random_uniform(ITER_ITEMS - 1) + 1;
|
|
|
|
void *pval = &item[ival];
|
|
|
|
item[ival] = ival;
|
|
|
|
|
|
|
|
/* randomly insert or remove */
|
|
|
|
dns_qpkey_t key;
|
|
|
|
size_t len = qpiter_makekey(key, item, pval, ival);
|
|
|
|
if (dns_qp_insert(qp, pval, ival) == ISC_R_EXISTS) {
|
2023-04-06 11:24:47 +01:00
|
|
|
void *pvald = NULL;
|
|
|
|
uint32_t ivald = 0;
|
|
|
|
dns_qp_deletekey(qp, key, len, &pvald, &ivald);
|
|
|
|
assert_ptr_equal(pval, pvald);
|
|
|
|
assert_int_equal(ival, ivald);
|
2023-02-09 14:37:43 +00:00
|
|
|
item[ival] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check that we see only valid items in the correct order */
|
|
|
|
uint32_t prev = 0;
|
|
|
|
dns_qpiter_t qpi;
|
|
|
|
dns_qpiter_init(qp, &qpi);
|
|
|
|
while (dns_qpiter_next(&qpi, &pval, &ival) == ISC_R_SUCCESS) {
|
|
|
|
assert_in_range(ival, prev + 1, ITER_ITEMS - 1);
|
|
|
|
assert_int_equal(ival, item[ival]);
|
|
|
|
assert_ptr_equal(pval, &item[ival]);
|
|
|
|
item[ival] = ~ival;
|
|
|
|
prev = ival;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ensure we saw every item */
|
|
|
|
for (ival = 0; ival < ITER_ITEMS; ival++) {
|
|
|
|
if (item[ival] != 0) {
|
|
|
|
assert_int_equal(item[ival], ~ival);
|
|
|
|
item[ival] = ival;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dns_qp_destroy(&qp);
|
|
|
|
}
|
|
|
|
|
2023-03-10 15:55:00 +00:00
|
|
|
static void
|
2023-02-10 16:53:31 +00:00
|
|
|
no_op(void *uctx, void *pval, uint32_t ival) {
|
|
|
|
UNUSED(uctx);
|
|
|
|
UNUSED(pval);
|
|
|
|
UNUSED(ival);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
qpkey_fromstring(dns_qpkey_t key, void *uctx, void *pval, uint32_t ival) {
|
|
|
|
dns_fixedname_t fixed;
|
|
|
|
|
|
|
|
UNUSED(uctx);
|
|
|
|
UNUSED(ival);
|
|
|
|
if (*(char *)pval == '\0') {
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
dns_test_namefromstring(pval, &fixed);
|
|
|
|
return (dns_qpkey_fromname(key, dns_fixedname_name(&fixed)));
|
|
|
|
}
|
|
|
|
|
2023-03-10 15:55:00 +00:00
|
|
|
const dns_qpmethods_t string_methods = {
|
2023-02-10 16:53:31 +00:00
|
|
|
no_op,
|
|
|
|
no_op,
|
|
|
|
qpkey_fromstring,
|
|
|
|
getname,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct check_partialmatch {
|
|
|
|
const char *query;
|
|
|
|
dns_qpfind_t options;
|
|
|
|
isc_result_t result;
|
|
|
|
const char *found;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
check_partialmatch(dns_qp_t *qp, struct check_partialmatch check[]) {
|
|
|
|
for (int i = 0; check[i].query != NULL; i++) {
|
|
|
|
isc_result_t result;
|
2023-09-06 23:57:42 -07:00
|
|
|
dns_fixedname_t fn1, fn2;
|
|
|
|
dns_name_t *name = dns_fixedname_initname(&fn1);
|
|
|
|
dns_name_t *foundname = dns_fixedname_initname(&fn2);
|
2023-02-10 16:53:31 +00:00
|
|
|
void *pval = NULL;
|
|
|
|
|
2023-09-06 23:57:42 -07:00
|
|
|
dns_test_namefromstring(check[i].query, &fn1);
|
|
|
|
result = dns_qp_findname_ancestor(qp, name, check[i].options,
|
|
|
|
foundname, &pval, NULL);
|
|
|
|
|
2023-02-10 16:53:31 +00:00
|
|
|
#if 0
|
2023-09-06 23:57:42 -07:00
|
|
|
fprintf(stderr, "%s (flags %u) %s (expected %s) "
|
|
|
|
"value \"%s\" (expected \"%s\")\n",
|
|
|
|
check[i].query, check[i].options,
|
|
|
|
isc_result_totext(result),
|
|
|
|
isc_result_totext(check[i].result), (char *)pval,
|
2023-02-10 16:53:31 +00:00
|
|
|
check[i].found);
|
|
|
|
#endif
|
2023-09-06 23:57:42 -07:00
|
|
|
|
2023-02-10 16:53:31 +00:00
|
|
|
assert_int_equal(result, check[i].result);
|
2023-09-06 23:57:42 -07:00
|
|
|
if (result == ISC_R_SUCCESS) {
|
|
|
|
assert_true(dns_name_equal(name, foundname));
|
|
|
|
} else if (result == DNS_R_PARTIALMATCH) {
|
|
|
|
/*
|
|
|
|
* there are cases where we may have passed a
|
|
|
|
* query name that was relative to the zone apex,
|
|
|
|
* and gotten back an absolute name from the
|
|
|
|
* partial match. it's also possible for an
|
|
|
|
* absolute query to get a partial match on a
|
|
|
|
* node that had an empty name. in these cases,
|
|
|
|
* sanity checking the relations between name
|
|
|
|
* and foundname can trigger an assertion, so
|
|
|
|
* let's just skip them.
|
|
|
|
*/
|
|
|
|
if (dns_name_isabsolute(name) ==
|
|
|
|
dns_name_isabsolute(foundname))
|
|
|
|
{
|
|
|
|
assert_false(dns_name_equal(name, foundname));
|
|
|
|
assert_true(
|
|
|
|
dns_name_issubdomain(name, foundname));
|
|
|
|
}
|
|
|
|
}
|
2023-02-10 16:53:31 +00:00
|
|
|
if (check[i].found == NULL) {
|
|
|
|
assert_null(pval);
|
|
|
|
} else {
|
|
|
|
assert_string_equal(pval, check[i].found);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
insert_str(dns_qp_t *qp, const char *str) {
|
|
|
|
isc_result_t result;
|
|
|
|
uintptr_t pval = (uintptr_t)str;
|
|
|
|
INSIST((pval & 3) == 0);
|
|
|
|
result = dns_qp_insert(qp, (void *)pval, 0);
|
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
ISC_RUN_TEST_IMPL(partialmatch) {
|
|
|
|
isc_result_t result;
|
|
|
|
dns_qp_t *qp = NULL;
|
2023-09-06 23:57:42 -07:00
|
|
|
int i = 0;
|
2023-02-10 16:53:31 +00:00
|
|
|
|
|
|
|
dns_qp_create(mctx, &string_methods, NULL, &qp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fixed size strings [16] should ensure leaf-compatible alignment.
|
|
|
|
*/
|
|
|
|
const char insert[][16] = {
|
|
|
|
"a.b.", "b.", "fo.bar.", "foo.bar.",
|
|
|
|
"fooo.bar.", "web.foo.bar.", ".", "",
|
|
|
|
};
|
|
|
|
|
2023-09-06 23:57:42 -07:00
|
|
|
/*
|
|
|
|
* omit the root node for now, otherwise we'll get "partial match"
|
|
|
|
* results when we want "not found".
|
|
|
|
*/
|
2023-02-10 16:53:31 +00:00
|
|
|
while (insert[i][0] != '.') {
|
|
|
|
insert_str(qp, insert[i++]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct check_partialmatch check1[] = {
|
|
|
|
{ "a.b.", 0, ISC_R_SUCCESS, "a.b." },
|
|
|
|
{ "a.b.", DNS_QPFIND_NOEXACT, DNS_R_PARTIALMATCH, "b." },
|
|
|
|
{ "b.c.", DNS_QPFIND_NOEXACT, ISC_R_NOTFOUND, NULL },
|
|
|
|
{ "bar.", 0, ISC_R_NOTFOUND, NULL },
|
|
|
|
{ "f.bar.", 0, ISC_R_NOTFOUND, NULL },
|
|
|
|
{ "foo.bar.", 0, ISC_R_SUCCESS, "foo.bar." },
|
|
|
|
{ "foo.bar.", DNS_QPFIND_NOEXACT, ISC_R_NOTFOUND, NULL },
|
|
|
|
{ "foooo.bar.", 0, ISC_R_NOTFOUND, NULL },
|
|
|
|
{ "w.foo.bar.", 0, DNS_R_PARTIALMATCH, "foo.bar." },
|
|
|
|
{ "www.foo.bar.", 0, DNS_R_PARTIALMATCH, "foo.bar." },
|
|
|
|
{ "web.foo.bar.", 0, ISC_R_SUCCESS, "web.foo.bar." },
|
|
|
|
{ "webby.foo.bar.", 0, DNS_R_PARTIALMATCH, "foo.bar." },
|
|
|
|
{ "my.web.foo.bar.", 0, DNS_R_PARTIALMATCH, "web.foo.bar." },
|
|
|
|
{ "web.foo.bar.", DNS_QPFIND_NOEXACT, DNS_R_PARTIALMATCH,
|
|
|
|
"foo.bar." },
|
|
|
|
{ "my.web.foo.bar.", DNS_QPFIND_NOEXACT, DNS_R_PARTIALMATCH,
|
|
|
|
"web.foo.bar." },
|
2023-04-05 00:36:37 -07:00
|
|
|
{ "my.other.foo.bar.", DNS_QPFIND_NOEXACT, DNS_R_PARTIALMATCH,
|
|
|
|
"foo.bar." },
|
2023-02-10 16:53:31 +00:00
|
|
|
{ NULL, 0, 0, NULL },
|
|
|
|
};
|
|
|
|
check_partialmatch(qp, check1);
|
|
|
|
|
|
|
|
/* what if the trie contains the root? */
|
|
|
|
INSIST(insert[i][0] == '.');
|
|
|
|
insert_str(qp, insert[i++]);
|
|
|
|
|
|
|
|
static struct check_partialmatch check2[] = {
|
|
|
|
{ "b.c.", DNS_QPFIND_NOEXACT, DNS_R_PARTIALMATCH, "." },
|
|
|
|
{ "bar.", 0, DNS_R_PARTIALMATCH, "." },
|
|
|
|
{ "foo.bar.", 0, ISC_R_SUCCESS, "foo.bar." },
|
|
|
|
{ "foo.bar.", DNS_QPFIND_NOEXACT, DNS_R_PARTIALMATCH, "." },
|
2023-09-06 23:57:42 -07:00
|
|
|
{ "bar", 0, ISC_R_NOTFOUND, NULL },
|
|
|
|
{ "bar", DNS_QPFIND_NOEXACT, DNS_R_PARTIALMATCH, "." },
|
2023-02-10 16:53:31 +00:00
|
|
|
{ NULL, 0, 0, NULL },
|
|
|
|
};
|
|
|
|
check_partialmatch(qp, check2);
|
|
|
|
|
2023-09-06 23:57:42 -07:00
|
|
|
/*
|
|
|
|
* what if entries in the trie are relative to the zone apex
|
|
|
|
* and there's no root node?
|
|
|
|
*/
|
2023-02-10 16:53:31 +00:00
|
|
|
dns_qpkey_t rootkey = { SHIFT_NOBYTE };
|
2023-04-06 11:24:47 +01:00
|
|
|
result = dns_qp_deletekey(qp, rootkey, 1, NULL, NULL);
|
2023-02-10 16:53:31 +00:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2023-09-06 23:57:42 -07:00
|
|
|
check_partialmatch(
|
|
|
|
qp,
|
|
|
|
(struct check_partialmatch[]){
|
|
|
|
{ "bar", 0, ISC_R_NOTFOUND, NULL },
|
|
|
|
{ "bar", DNS_QPFIND_NOEXACT, ISC_R_NOTFOUND, NULL },
|
|
|
|
{ "bar.", 0, ISC_R_NOTFOUND, NULL },
|
|
|
|
{ "bar.", DNS_QPFIND_NOEXACT, ISC_R_NOTFOUND, NULL },
|
|
|
|
{ NULL, 0, 0, NULL },
|
|
|
|
});
|
|
|
|
|
|
|
|
/* what if there's a root node with an empty key? */
|
2023-02-10 16:53:31 +00:00
|
|
|
INSIST(insert[i][0] == '\0');
|
|
|
|
insert_str(qp, insert[i++]);
|
2023-09-06 23:57:42 -07:00
|
|
|
check_partialmatch(
|
|
|
|
qp,
|
|
|
|
(struct check_partialmatch[]){
|
|
|
|
{ "bar", 0, DNS_R_PARTIALMATCH, "" },
|
|
|
|
{ "bar", DNS_QPFIND_NOEXACT, DNS_R_PARTIALMATCH, "" },
|
|
|
|
{ "bar.", 0, DNS_R_PARTIALMATCH, "" },
|
|
|
|
{ "bar.", DNS_QPFIND_NOEXACT, DNS_R_PARTIALMATCH, "" },
|
|
|
|
{ NULL, 0, 0, NULL },
|
|
|
|
});
|
2023-02-10 16:53:31 +00:00
|
|
|
|
|
|
|
dns_qp_destroy(&qp);
|
|
|
|
}
|
|
|
|
|
2022-06-14 16:20:28 +01:00
|
|
|
ISC_TEST_LIST_START
|
|
|
|
ISC_TEST_ENTRY(qpkey_name)
|
|
|
|
ISC_TEST_ENTRY(qpkey_sort)
|
2023-02-09 14:37:43 +00:00
|
|
|
ISC_TEST_ENTRY(qpiter)
|
2023-02-10 16:53:31 +00:00
|
|
|
ISC_TEST_ENTRY(partialmatch)
|
2022-06-14 16:20:28 +01:00
|
|
|
ISC_TEST_LIST_END
|
|
|
|
|
|
|
|
ISC_TEST_MAIN
|