sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-09-06 22:21:59 +00:00
parent 30d14db1d3
commit 0c904fa153
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
235 changed files with 12410 additions and 6193 deletions

View file

@ -488,6 +488,27 @@ static int daemon_get_shufport(struct daemon* daemon, int* shufport)
return avail;
}
/**
* Clear and delete per-worker alloc caches, and free memory maintained in
* superalloc.
* The rrset and message caches must be empty at the time of call.
* @param daemon: the daemon that maintains the alloc caches to be cleared.
*/
static void
daemon_clear_allocs(struct daemon* daemon)
{
int i;
for(i=0; i<daemon->num; i++) {
alloc_clear(daemon->worker_allocs[i]);
free(daemon->worker_allocs[i]);
}
free(daemon->worker_allocs);
daemon->worker_allocs = NULL;
alloc_clear_special(&daemon->superalloc);
}
/**
* Allocate empty worker structures. With backptr and thread-number,
* from 0..numthread initialised. Used as user arguments to new threads.
@ -540,6 +561,21 @@ daemon_create_workers(struct daemon* daemon)
/* the above is not ports/numthr, due to rounding */
fatal_exit("could not create worker");
}
/* create per-worker alloc caches if not reusing existing ones. */
if(!daemon->worker_allocs) {
daemon->worker_allocs = (struct alloc_cache**)calloc(
(size_t)daemon->num, sizeof(struct alloc_cache*));
if(!daemon->worker_allocs)
fatal_exit("could not allocate worker allocs");
for(i=0; i<daemon->num; i++) {
struct alloc_cache* alloc = calloc(1,
sizeof(struct alloc_cache));
if (!alloc)
fatal_exit("could not allocate worker alloc");
alloc_init(alloc, &daemon->superalloc, i);
daemon->worker_allocs[i] = alloc;
}
}
free(shufport);
}
@ -771,6 +807,7 @@ daemon_fork(struct daemon* daemon)
/* Shutdown SHM */
shm_main_shutdown(daemon);
daemon->reuse_cache = daemon->workers[0]->reuse_cache;
daemon->need_to_exit = daemon->workers[0]->need_to_exit;
}
@ -785,9 +822,16 @@ daemon_cleanup(struct daemon* daemon)
log_thread_set(NULL);
/* clean up caches because
* a) RRset IDs will be recycled after a reload, causing collisions
* b) validation config can change, thus rrset, msg, keycache clear */
* b) validation config can change, thus rrset, msg, keycache clear
*
* If we are trying to keep the cache as long as possible, we should
* defer the cleanup until we know whether the new configuration allows
* the reuse. (If we're exiting, cleanup should be done here). */
if(!daemon->reuse_cache || daemon->need_to_exit) {
slabhash_clear(&daemon->env->rrset_cache->table);
slabhash_clear(daemon->env->msg_cache);
}
daemon->old_num = daemon->num; /* save the current num */
local_zones_delete(daemon->local_zones);
daemon->local_zones = NULL;
respip_set_delete(daemon->respip_set);
@ -802,8 +846,13 @@ daemon_cleanup(struct daemon* daemon)
worker_delete(daemon->workers[i]);
free(daemon->workers);
daemon->workers = NULL;
/* Unless we're trying to keep the cache, worker alloc_caches should be
* cleared and freed here. We do this after deleting workers to
* guarantee that the alloc caches are valid throughout the lifetime
* of workers. */
if(!daemon->reuse_cache || daemon->need_to_exit)
daemon_clear_allocs(daemon);
daemon->num = 0;
alloc_clear_special(&daemon->superalloc);
#ifdef USE_DNSTAP
dt_delete(daemon->dtenv);
daemon->dtenv = NULL;
@ -900,8 +949,42 @@ daemon_delete(struct daemon* daemon)
void daemon_apply_cfg(struct daemon* daemon, struct config_file* cfg)
{
int new_num = cfg->num_threads?cfg->num_threads:1;
daemon->cfg = cfg;
config_apply(cfg);
/* If this is a reload and we deferred the decision on whether to
* reuse the alloc, RRset, and message caches, then check to see if
* it's safe to keep the caches:
* - changing the number of threads is obviously incompatible with
* keeping the per-thread alloc caches. It also means we have to
* clear RRset and message caches. (note that 'new_num' may be
* adjusted in daemon_create_workers, but for our purpose we can
* simply compare it with 'old_num'; if they are equal here,
* 'new_num' won't be adjusted to a different value than 'old_num').
* - changing RRset cache size effectively clears any remaining cache
* entries. We could keep their keys in alloc caches, but it would
* be more consistent with the sense of the change to clear allocs
* and free memory. To do so we also have to clear message cache.
* - only changing message cache size does not necessarily affect
* RRset or alloc cache. But almost all new subsequent queries will
* require recursive resolution anyway, so it doesn't help much to
* just keep RRset and alloc caches. For simplicity we clear/free
* the other two, too. */
if(daemon->worker_allocs &&
(new_num != daemon->old_num ||
!slabhash_is_size(daemon->env->msg_cache, cfg->msg_cache_size,
cfg->msg_cache_slabs) ||
!slabhash_is_size(&daemon->env->rrset_cache->table,
cfg->rrset_cache_size, cfg->rrset_cache_slabs)))
{
log_warn("cannot reuse caches due to critical config change");
slabhash_clear(&daemon->env->rrset_cache->table);
slabhash_clear(daemon->env->msg_cache);
daemon_clear_allocs(daemon);
}
if(!slabhash_is_size(daemon->env->msg_cache, cfg->msg_cache_size,
cfg->msg_cache_slabs)) {
slabhash_delete(daemon->env->msg_cache);