mirror of
https://gitlab.isc.org/isc-projects/bind9
synced 2025-08-23 02:28:55 +00:00
Use detected cache line size
IBM power architecture has L1 cache line size equal to 128. Take advantage of that on that architecture, do not force more common value of 64. When it is possible to detect higher value, use that value instead. Keep the default to be 64.
This commit is contained in:
parent
bff7dbeef9
commit
f00f521e9c
@ -111,7 +111,7 @@ isc_hp_new(isc_mem_t *mctx, size_t max_hps, isc_hp_deletefunc_t *deletefunc) {
|
|||||||
isc_hp_uintptr_t *hps;
|
isc_hp_uintptr_t *hps;
|
||||||
|
|
||||||
hps = isc_mem_get_aligned(mctx, hp->max_hps * sizeof(*hps),
|
hps = isc_mem_get_aligned(mctx, hp->max_hps * sizeof(*hps),
|
||||||
ISC_OS_CACHELINE_SIZE);
|
isc_os_cacheline());
|
||||||
for (int j = 0; j < hp->max_hps; j++) {
|
for (int j = 0; j < hp->max_hps; j++) {
|
||||||
atomic_init(&hps[j], 0);
|
atomic_init(&hps[j], 0);
|
||||||
}
|
}
|
||||||
@ -124,8 +124,7 @@ isc_hp_new(isc_mem_t *mctx, size_t max_hps, isc_hp_deletefunc_t *deletefunc) {
|
|||||||
for (int i = 0; i < isc__hp_max_threads; i++) {
|
for (int i = 0; i < isc__hp_max_threads; i++) {
|
||||||
retirelist_t *rl;
|
retirelist_t *rl;
|
||||||
|
|
||||||
rl = isc_mem_get_aligned(mctx, sizeof(*rl),
|
rl = isc_mem_get_aligned(mctx, sizeof(*rl), isc_os_cacheline());
|
||||||
ISC_OS_CACHELINE_SIZE);
|
|
||||||
rl->size = 0;
|
rl->size = 0;
|
||||||
rl->list = isc_mem_get(hp->mctx,
|
rl->list = isc_mem_get(hp->mctx,
|
||||||
hp->max_retired * sizeof(uintptr_t));
|
hp->max_retired * sizeof(uintptr_t));
|
||||||
@ -149,12 +148,12 @@ isc_hp_destroy(isc_hp_t *hp) {
|
|||||||
isc_mem_put(hp->mctx, rl->list,
|
isc_mem_put(hp->mctx, rl->list,
|
||||||
hp->max_retired * sizeof(uintptr_t));
|
hp->max_retired * sizeof(uintptr_t));
|
||||||
isc_mem_put_aligned(hp->mctx, rl, sizeof(*rl),
|
isc_mem_put_aligned(hp->mctx, rl, sizeof(*rl),
|
||||||
ISC_OS_CACHELINE_SIZE);
|
isc_os_cacheline());
|
||||||
}
|
}
|
||||||
for (int i = 0; i < isc__hp_max_threads; i++) {
|
for (int i = 0; i < isc__hp_max_threads; i++) {
|
||||||
isc_hp_uintptr_t *hps = hp->hp[i];
|
isc_hp_uintptr_t *hps = hp->hp[i];
|
||||||
isc_mem_put_aligned(hp->mctx, hps, hp->max_hps * sizeof(*hps),
|
isc_mem_put_aligned(hp->mctx, hps, hp->max_hps * sizeof(*hps),
|
||||||
ISC_OS_CACHELINE_SIZE);
|
isc_os_cacheline());
|
||||||
}
|
}
|
||||||
isc_mem_put(hp->mctx, hp->hp, isc__hp_max_threads * sizeof(hp->hp[0]));
|
isc_mem_put(hp->mctx, hp->hp, isc__hp_max_threads * sizeof(hp->hp[0]));
|
||||||
isc_mem_put(hp->mctx, hp->rl, isc__hp_max_threads * sizeof(hp->rl[0]));
|
isc_mem_put(hp->mctx, hp->rl, isc__hp_max_threads * sizeof(hp->rl[0]));
|
||||||
|
@ -34,4 +34,12 @@ isc_os_ncpus(void);
|
|||||||
* be determined.
|
* be determined.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
unsigned long
|
||||||
|
isc_os_cacheline(void);
|
||||||
|
/*%<
|
||||||
|
* Return L1 caheline size of the CPU.
|
||||||
|
* If L1 cache is greater than ISC_OS_CACHELINE_SIZE, ensure it is used
|
||||||
|
* instead of constant. Is common on ppc64le architecture.
|
||||||
|
*/
|
||||||
|
|
||||||
ISC_LANG_ENDDECLS
|
ISC_LANG_ENDDECLS
|
||||||
|
@ -459,7 +459,7 @@ mem_create(isc_mem_t **ctxp, unsigned int flags) {
|
|||||||
|
|
||||||
REQUIRE(ctxp != NULL && *ctxp == NULL);
|
REQUIRE(ctxp != NULL && *ctxp == NULL);
|
||||||
|
|
||||||
ctx = mallocx(sizeof(*ctx), MALLOCX_ALIGN(ISC_OS_CACHELINE_SIZE));
|
ctx = mallocx(sizeof(*ctx), MALLOCX_ALIGN(isc_os_cacheline()));
|
||||||
INSIST(ctx != NULL);
|
INSIST(ctx != NULL);
|
||||||
|
|
||||||
*ctx = (isc_mem_t){
|
*ctx = (isc_mem_t){
|
||||||
@ -578,7 +578,7 @@ destroy(isc_mem_t *ctx) {
|
|||||||
if (ctx->checkfree) {
|
if (ctx->checkfree) {
|
||||||
INSIST(malloced == 0);
|
INSIST(malloced == 0);
|
||||||
}
|
}
|
||||||
sdallocx(ctx, sizeof(*ctx), MALLOCX_ALIGN(ISC_OS_CACHELINE_SIZE));
|
sdallocx(ctx, sizeof(*ctx), MALLOCX_ALIGN(isc_os_cacheline()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
10
lib/isc/os.c
10
lib/isc/os.c
@ -20,6 +20,7 @@
|
|||||||
#include "os_p.h"
|
#include "os_p.h"
|
||||||
|
|
||||||
static unsigned int isc__os_ncpus = 0;
|
static unsigned int isc__os_ncpus = 0;
|
||||||
|
static unsigned long isc__os_cacheline = ISC_OS_CACHELINE_SIZE;
|
||||||
|
|
||||||
#ifdef HAVE_SYSCONF
|
#ifdef HAVE_SYSCONF
|
||||||
|
|
||||||
@ -76,12 +77,19 @@ isc_os_ncpus(void) {
|
|||||||
return (isc__os_ncpus);
|
return (isc__os_ncpus);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned long
|
||||||
|
isc_os_cacheline(void) {
|
||||||
|
return (isc__os_cacheline);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
isc__os_initialize(void) {
|
isc__os_initialize(void) {
|
||||||
ncpus_initialize();
|
ncpus_initialize();
|
||||||
#if defined(HAVE_SYSCONF) && defined(_SC_LEVEL1_DCACHE_LINESIZE)
|
#if defined(HAVE_SYSCONF) && defined(_SC_LEVEL1_DCACHE_LINESIZE)
|
||||||
long s = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
|
long s = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
|
||||||
RUNTIME_CHECK((size_t)s == (size_t)ISC_OS_CACHELINE_SIZE || s <= 0);
|
if (s > 0 && (unsigned long)s > isc__os_cacheline) {
|
||||||
|
isc__os_cacheline = s;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,8 +94,7 @@ isc_queue_new(isc_mem_t *mctx) {
|
|||||||
isc_queue_t *queue = NULL;
|
isc_queue_t *queue = NULL;
|
||||||
node_t *sentinel = NULL;
|
node_t *sentinel = NULL;
|
||||||
|
|
||||||
queue = isc_mem_get_aligned(mctx, sizeof(*queue),
|
queue = isc_mem_get_aligned(mctx, sizeof(*queue), isc_os_cacheline());
|
||||||
ISC_OS_CACHELINE_SIZE);
|
|
||||||
|
|
||||||
*queue = (isc_queue_t){ 0 };
|
*queue = (isc_queue_t){ 0 };
|
||||||
|
|
||||||
@ -211,5 +210,5 @@ isc_queue_destroy(isc_queue_t *queue) {
|
|||||||
isc_hp_destroy(queue->hp);
|
isc_hp_destroy(queue->hp);
|
||||||
|
|
||||||
isc_mem_putanddetach_aligned(&queue->mctx, queue, sizeof(*queue),
|
isc_mem_putanddetach_aligned(&queue->mctx, queue, sizeof(*queue),
|
||||||
ISC_OS_CACHELINE_SIZE);
|
isc_os_cacheline());
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user