sync with OpenBSD -current

This commit is contained in:
purplerain 2024-02-06 19:52:24 +00:00
parent 0bc0a510b3
commit 593fd57b5d
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
61 changed files with 797 additions and 428 deletions

View file

@ -1,4 +1,4 @@
# $OpenBSD: Makefile,v 1.6 2021/12/28 15:45:17 patrick Exp $
# $OpenBSD: Makefile,v 1.8 2024/02/06 05:07:28 jca Exp $
.include <bsd.own.mk>
@ -21,6 +21,8 @@ CPPFLAGS+= -DVISIBILITY_HIDDEN
RTARCH= x86_64
.elif ${MACHINE_ARCH} == "powerpc"
RTARCH= ppc
.elif ${MACHINE_ARCH} == "riscv64"
RTARCH= riscv
.else
RTARCH= ${MACHINE_ARCH}
.endif
@ -301,7 +303,7 @@ SRCS+= comparetf2.c \
SRCS+= atomic_lock_free.c
.endif
.if ${RTARCH} == "riscv64"
.if ${RTARCH} == "riscv"
SRCS+= comparetf2.c \
extenddftf2.c \
extendsftf2.c \
@ -318,6 +320,8 @@ SRCS+= comparetf2.c \
floatunsitf.c \
floatuntitf.c \
multc3.c \
restore.S \
save.S \
trunctfdf2.c \
trunctfsf2.c
.endif

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ca.c,v 1.99 2024/01/24 10:09:07 tobhe Exp $ */
/* $OpenBSD: ca.c,v 1.100 2024/02/06 13:10:56 tobhe Exp $ */
/*
* Copyright (c) 2010-2013 Reyk Floeter <reyk@openbsd.org>
@ -46,7 +46,7 @@
void ca_run(struct privsep *, struct privsep_proc *, void *);
void ca_shutdown(void);
void ca_reset(struct privsep *);
void ca_reset(struct iked *);
int ca_reload(struct iked *);
int ca_cert_local(struct iked *, X509 *);
@ -175,9 +175,8 @@ ca_getkey(struct privsep *ps, struct iked_id *key, enum imsg_type type)
}
void
ca_reset(struct privsep *ps)
ca_reset(struct iked *env)
{
struct iked *env = iked_env;
struct ca_store *store = env->sc_priv;
if (store->ca_privkey.id_type == IKEV2_ID_NONE ||
@ -338,7 +337,7 @@ ca_dispatch_parent(int fd, struct privsep_proc *p, struct imsg *imsg)
memcpy(&mode, imsg->data, sizeof(mode));
if (mode == RESET_ALL || mode == RESET_CA) {
log_debug("%s: config reset", __func__);
ca_reset(&env->sc_ps);
ca_reset(env);
}
break;
case IMSG_OCSP_FD:

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ipsecctl.c,v 1.87 2024/01/29 00:59:54 yasuoka Exp $ */
/* $OpenBSD: ipsecctl.c,v 1.88 2024/02/06 05:39:28 yasuoka Exp $ */
/*
* Copyright (c) 2004, 2005 Hans-Joerg Hoexer <hshoexer@openbsd.org>
*
@ -706,11 +706,7 @@ ipsecctl_show(int opts)
}
}
/* open /etc/{services,protocols} before pledge(2) */
setservent(1);
setprotoent(1);
if (pledge("stdio", NULL) == -1)
if (pledge("stdio dns", NULL) == -1)
err(1, "pledge");
if (rbuf != NULL) {
@ -751,6 +747,9 @@ ipsecctl_show(int opts)
printf("No flows\n");
}
if (pledge("stdio", NULL) == -1)
err(1, "pledge");
if (sbuf != NULL) {
if (opts & IPSECCTL_OPT_SHOWALL)
ipsecctl_print_title("SAD:");
@ -785,10 +784,6 @@ ipsecctl_show(int opts)
ipsecctl_print_title("SAD:");
printf("No entries\n");
}
/* close /etc/{services,protocols} */
endservent();
endprotoent();
}
int

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qwx.c,v 1.17 2024/02/04 17:51:59 kettenis Exp $ */
/* $OpenBSD: qwx.c,v 1.18 2024/02/06 14:18:15 stsp Exp $ */
/*
* Copyright 2023 Stefan Sperling <stsp@openbsd.org>
@ -54,6 +54,8 @@
* Driver for Qualcomm Technologies 802.11ax chipset.
*/
#include "bpfilter.h"
#include <sys/types.h>
#include <sys/param.h>
#include <sys/device.h>
@ -72,6 +74,9 @@
#include <dev/ofw/openfirm.h>
#endif
#if NBPFILTER > 0
#include <net/bpf.h>
#endif
#include <net/if.h>
#include <net/if_media.h>
@ -361,6 +366,15 @@ qwx_tx(struct qwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
wh = mtod(m, struct ieee80211_frame *);
frame_type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
#if NBPFILTER > 0
if (sc->sc_drvbpf != NULL) {
struct qwx_tx_radiotap_header *tap = &sc->sc_txtap;
bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
m, BPF_DIRECTION_OUT);
}
#endif
if (frame_type == IEEE80211_FC0_TYPE_MGT)
return qwx_mac_mgmt_tx_wmi(sc, arvif, pdev_id, m);
@ -12636,6 +12650,14 @@ qwx_mgmt_rx_event(struct qwx_softc *sc, struct mbuf *m)
DNPRINTF(QWX_D_MGMT, "%s: event mgmt rx freq %d chan %d snr %d\n",
__func__, rx_ev.chan_freq, rx_ev.channel, rx_ev.snr);
#if NBPFILTER > 0
if (sc->sc_drvbpf != NULL) {
struct qwx_rx_radiotap_header *tap = &sc->sc_rxtap;
bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
m, BPF_DIRECTION_IN);
}
#endif
ieee80211_input(ifp, m, ni, &rxi);
exit:
#ifdef notyet
@ -15213,8 +15235,14 @@ qwx_dp_rx_deliver_msdu(struct qwx_softc *sc, struct qwx_rx_msdu *msdu)
wh = mtod(msdu->m, struct ieee80211_frame *);
ni = ieee80211_find_rxnode(ic, wh);
/* TODO: bpf */
#if NBPFILTER > 0
if (sc->sc_drvbpf != NULL) {
struct qwx_rx_radiotap_header *tap = &sc->sc_rxtap;
bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
msdu->m, BPF_DIRECTION_IN);
}
#endif
ieee80211_input(ifp, msdu->m, ni, &msdu->rxi);
ieee80211_release_node(ic, ni);
}
@ -23063,6 +23091,23 @@ qwx_run_stop(struct qwx_softc *sc)
return ENOTSUP;
}
#if NBPFILTER > 0
void
qwx_radiotap_attach(struct qwx_softc *sc)
{
bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
sc->sc_txtap_len = sizeof(sc->sc_txtapu);
sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
}
#endif
int
qwx_attach(struct qwx_softc *sc)
{
@ -23073,7 +23118,9 @@ qwx_attach(struct qwx_softc *sc)
task_set(&sc->init_task, qwx_init_task, sc);
task_set(&sc->newstate_task, qwx_newstate_task, sc);
timeout_set_proc(&sc->scan.timeout, qwx_scan_timeout, sc);
#if NBPFILTER > 0
qwx_radiotap_attach(sc);
#endif
for (i = 0; i < nitems(sc->pdevs); i++)
sc->pdevs[i].sc = sc;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qwxvar.h,v 1.11 2024/02/03 20:07:19 kettenis Exp $ */
/* $OpenBSD: qwxvar.h,v 1.12 2024/02/06 14:18:15 stsp Exp $ */
/*
* Copyright (c) 2018-2019 The Linux Foundation.
@ -1624,6 +1624,18 @@ struct qwx_ext_irq_grp {
#endif
};
struct qwx_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
} __packed;
#define IWX_RX_RADIOTAP_PRESENT 0 /* TODO add more information */
struct qwx_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
} __packed;
#define IWX_TX_RADIOTAP_PRESENT 0 /* TODO add more information */
struct qwx_softc {
struct device sc_dev;
struct ieee80211com sc_ic;
@ -1751,6 +1763,24 @@ struct qwx_softc {
uint32_t msi_ce_irqmask;
struct qmi_wlanfw_request_mem_ind_msg_v01 *sc_req_mem_ind;
#if NBPFILTER > 0
caddr_t sc_drvbpf;
union {
struct qwx_rx_radiotap_header th;
uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
} sc_rxtapu;
#define sc_rxtap sc_rxtapu.th
int sc_rxtap_len;
union {
struct qwx_tx_radiotap_header th;
uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
} sc_txtapu;
#define sc_txtap sc_txtapu.th
int sc_txtap_len;
#endif
};
int qwx_ce_intr(void *);

View file

@ -333,6 +333,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
{
struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
struct amdgpu_ras *con;
int r;
if (reset_device_list == NULL)
@ -358,7 +359,30 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
*/
amdgpu_register_gpu_instance(tmp_adev);
/* Resume RAS */
/* Resume RAS, ecc_irq */
con = amdgpu_ras_get_context(tmp_adev);
if (!amdgpu_sriov_vf(tmp_adev) && con) {
if (tmp_adev->sdma.ras &&
tmp_adev->sdma.ras->ras_block.ras_late_init) {
r = tmp_adev->sdma.ras->ras_block.ras_late_init(tmp_adev,
&tmp_adev->sdma.ras->ras_block.ras_comm);
if (r) {
dev_err(tmp_adev->dev, "SDMA failed to execute ras_late_init! ret:%d\n", r);
goto end;
}
}
if (tmp_adev->gfx.ras &&
tmp_adev->gfx.ras->ras_block.ras_late_init) {
r = tmp_adev->gfx.ras->ras_block.ras_late_init(tmp_adev,
&tmp_adev->gfx.ras->ras_block.ras_comm);
if (r) {
dev_err(tmp_adev->dev, "GFX failed to execute ras_late_init! ret:%d\n", r);
goto end;
}
}
}
amdgpu_ras_resume(tmp_adev);
/* Update PSP FW topology after reset */

View file

@ -90,7 +90,7 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
return NULL;
fence = container_of(f, struct amdgpu_amdkfd_fence, base);
if (fence && f->ops == &amdkfd_fence_ops)
if (f->ops == &amdkfd_fence_ops)
return fence;
return NULL;

View file

@ -1224,6 +1224,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
release_firmware(adev->pm.fw);
if (fw_ver < 0x00160e00)
return true;
}

View file

@ -885,21 +885,28 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
* seconds, so here, we just pick up three parts for emulation.
*/
ret = memcmp(vram_ptr, cptr, 10);
if (ret)
return ret;
if (ret) {
ret = -EIO;
goto release_buffer;
}
ret = memcmp(vram_ptr + (size / 2), cptr, 10);
if (ret)
return ret;
if (ret) {
ret = -EIO;
goto release_buffer;
}
ret = memcmp(vram_ptr + size - 10, cptr, 10);
if (ret)
return ret;
if (ret) {
ret = -EIO;
goto release_buffer;
}
release_buffer:
amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
&vram_ptr);
return 0;
return ret;
}
static ssize_t current_memory_partition_show(

View file

@ -885,6 +885,11 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
op_input.set_shader_debugger.process_context_addr = process_context_addr;
op_input.set_shader_debugger.flags.u32all = flags;
/* use amdgpu mes_flush_shader_debugger instead */
if (op_input.set_shader_debugger.flags.process_ctx_flush)
return -EINVAL;
op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
@ -904,6 +909,32 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
return r;
}
int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr)
{
struct mes_misc_op_input op_input = {0};
int r;
if (!adev->mes.funcs->misc_op) {
DRM_ERROR("mes flush shader debugger is not supported!\n");
return -EINVAL;
}
op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
op_input.set_shader_debugger.process_context_addr = process_context_addr;
op_input.set_shader_debugger.flags.process_ctx_flush = true;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
DRM_ERROR("failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
return r;
}
static void
amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
struct amdgpu_ring *ring,

View file

@ -293,9 +293,10 @@ struct mes_misc_op_input {
uint64_t process_context_addr;
union {
struct {
uint64_t single_memop : 1;
uint64_t single_alu_op : 1;
uint64_t reserved: 30;
uint32_t single_memop : 1;
uint32_t single_alu_op : 1;
uint32_t reserved: 29;
uint32_t process_ctx_flush: 1;
};
uint32_t u32all;
} flags;
@ -371,7 +372,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
const uint32_t *tcp_watch_cntl,
uint32_t flags,
bool trap_en);
int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr);
int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
int queue_type, int idx,
struct amdgpu_mes_ctx_data *ctx_data,

View file

@ -1270,19 +1270,15 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
* amdgpu_bo_move_notify - notification about a memory move
* @bo: pointer to a buffer object
* @evict: if this move is evicting the buffer from the graphics address space
* @new_mem: new information of the bufer object
*
* Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
* bookkeeping.
* TTM driver callback which is called when ttm moves a buffer.
*/
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
struct ttm_resource *old_mem = bo->resource;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
@ -1301,13 +1297,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,

View file

@ -345,9 +345,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags);
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem);
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,

View file

@ -195,7 +195,8 @@ static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
/* Never sync to VM updates either. */
if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
owner != AMDGPU_FENCE_OWNER_KFD)
return false;
/* Ignore fences depending on the sync mode */

View file

@ -545,10 +545,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
out:
/* update statistics */
atomic64_add(bo->base.size, &adev->num_bytes_moved);
amdgpu_bo_move_notify(bo, evict, new_mem);
amdgpu_bo_move_notify(bo, evict);
return 0;
}
@ -1592,7 +1593,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
amdgpu_bo_move_notify(bo, false, NULL);
amdgpu_bo_move_notify(bo, false);
}
static struct ttm_device_funcs amdgpu_bo_driver = {

View file

@ -1330,9 +1330,13 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
if (err)
return -ENODEV;
err = amdgpu_ucode_validate(*fw);
if (err)
if (err) {
dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
release_firmware(*fw);
*fw = NULL;
}
return err;
}

View file

@ -102,7 +102,9 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
AMD_APU_IS_RENOIR |
AMD_APU_IS_GREEN_SARDINE))
/*
* Raven2 has a HW issue that it is unable to use the
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.

View file

@ -139,7 +139,9 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
AMD_APU_IS_RENOIR |
AMD_APU_IS_GREEN_SARDINE))
/*
* Raven2 has a HW issue that it is unable to use the
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.

View file

@ -1141,6 +1141,10 @@ static int gmc_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
if (adev->gmc.ecc_irq.funcs &&
amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
return 0;
}

View file

@ -974,6 +974,11 @@ static int gmc_v11_0_hw_fini(void *handle)
}
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
if (adev->gmc.ecc_irq.funcs &&
amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
gmc_v11_0_gart_disable(adev);
return 0;

View file

@ -914,8 +914,8 @@ static int gmc_v6_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
static int gmc_v6_0_hw_fini(void *handle)

View file

@ -1103,8 +1103,8 @@ static int gmc_v7_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
static int gmc_v7_0_hw_fini(void *handle)

View file

@ -1224,8 +1224,8 @@ static int gmc_v8_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
static int gmc_v8_0_hw_fini(void *handle)

View file

@ -2380,8 +2380,8 @@ static int gmc_v9_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
/**
@ -2420,6 +2420,10 @@ static int gmc_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
if (adev->gmc.ecc_irq.funcs &&
amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
return 0;
}

View file

@ -96,7 +96,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
AMD_APU_IS_RENOIR |
AMD_APU_IS_GREEN_SARDINE))
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the

View file

@ -87,6 +87,8 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
return;
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
if (dev->kfd->shared_resources.enable_mes)
amdgpu_mes_flush_shader_debugger(dev->adev, pdd->proc_ctx_gpu_addr);
pdd->already_dequeued = true;
}

View file

@ -391,14 +391,9 @@ static void svm_range_bo_release(struct kref *kref)
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
/* We're not in the eviction worker.
* Signal the fence and synchronize with any
* pending eviction work.
*/
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
/* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
cancel_work_sync(&svm_bo->eviction_work);
}
dma_fence_put(&svm_bo->eviction_fence->base);
amdgpu_bo_unref(&svm_bo->bo);
kfree(svm_bo);
@ -2348,8 +2343,10 @@ retry:
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
/* Pairs with mmget in svm_range_add_list_work */
mmput(mm);
/* Pairs with mmget in svm_range_add_list_work. If dropping the
* last mm refcount, schedule release work to avoid circular locking
*/
mmput_async(mm);
spin_lock(&svms->deferred_list_lock);
}
@ -2660,6 +2657,7 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
{
struct vm_area_struct *vma;
struct interval_tree_node *node;
struct rb_node *rb_node;
unsigned long start_limit, end_limit;
vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
@ -2679,16 +2677,15 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
if (node) {
end_limit = min(end_limit, node->start);
/* Last range that ends before the fault address */
node = container_of(rb_prev(&node->rb),
struct interval_tree_node, rb);
rb_node = rb_prev(&node->rb);
} else {
/* Last range must end before addr because
* there was no range after addr
*/
node = container_of(rb_last(&p->svms.objects.rb_root),
struct interval_tree_node, rb);
rb_node = rb_last(&p->svms.objects.rb_root);
}
if (node) {
if (rb_node) {
node = container_of(rb_node, struct interval_tree_node, rb);
if (node->last >= addr) {
WARN(1, "Overlap with prev node and page fault addr\n");
return -EFAULT;
@ -3424,13 +3421,14 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
{
if (!fence)
return -EINVAL;
if (dma_fence_is_signaled(&fence->base))
return 0;
if (fence->svm_bo) {
/* Dereferencing fence->svm_bo is safe here because the fence hasn't
* signaled yet and we're under the protection of the fence->lock.
* After the fence is signaled in svm_range_bo_release, we cannot get
* here any more.
*
* Reference is dropped in svm_range_evict_svm_bo_worker.
*/
if (svm_bo_ref_unless_zero(fence->svm_bo)) {
WRITE_ONCE(fence->svm_bo->evicting, 1);
schedule_work(&fence->svm_bo->eviction_work);
}
@ -3445,8 +3443,6 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
int r = 0;
svm_bo = container_of(work, struct svm_range_bo, eviction_work);
if (!svm_bo_ref_unless_zero(svm_bo))
return; /* svm_bo was freed while eviction was pending */
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
mm = svm_bo->eviction_fence->mm;

View file

@ -1452,17 +1452,19 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
/* CPU->CPU link*/
cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
if (cpu_dev) {
list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
if (iolink3->node_to == iolink2->node_to)
break;
list_for_each_entry(iolink3, &cpu_dev->io_link_props, list) {
if (iolink3->node_to != iolink2->node_to)
continue;
props->weight += iolink3->weight;
props->min_latency += iolink3->min_latency;
props->max_latency += iolink3->max_latency;
props->min_bandwidth = min(props->min_bandwidth,
iolink3->min_bandwidth);
props->max_bandwidth = min(props->max_bandwidth,
iolink3->max_bandwidth);
props->weight += iolink3->weight;
props->min_latency += iolink3->min_latency;
props->max_latency += iolink3->max_latency;
props->min_bandwidth = min(props->min_bandwidth,
iolink3->min_bandwidth);
props->max_bandwidth = min(props->max_bandwidth,
iolink3->max_bandwidth);
break;
}
} else {
WARN(1, "CPU node not found");
}

View file

@ -65,7 +65,6 @@
#include "amdgpu_dm_debugfs.h"
#endif
#include "amdgpu_dm_psr.h"
#include "amdgpu_dm_replay.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@ -1248,7 +1247,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
/* AGP aperture is disabled */
if (agp_bot == agp_top) {
logical_addr_low = adev->gmc.fb_start >> 18;
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
AMD_APU_IS_RENOIR |
AMD_APU_IS_GREEN_SARDINE))
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
@ -1260,7 +1261,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
logical_addr_high = adev->gmc.fb_end >> 18;
} else {
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
AMD_APU_IS_RENOIR |
AMD_APU_IS_GREEN_SARDINE))
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
@ -4342,7 +4345,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
bool replay_feature_enabled = false;
int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
@ -4452,20 +4454,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
replay_feature_enabled = true;
break;
default:
replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
break;
}
}
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@ -4514,12 +4502,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_dm_update_connector_after_detect(aconnector);
setup_backlight_device(dm, aconnector);
/*
* Disable psr if replay can be enabled
*/
if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
psr_feature_enabled = false;
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);

View file

@ -29,7 +29,6 @@
#include "dc.h"
#include "amdgpu.h"
#include "amdgpu_dm_psr.h"
#include "amdgpu_dm_replay.h"
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_trace.h"
@ -124,12 +123,7 @@ static void vblank_control_worker(struct work_struct *work)
* fill_dc_dirty_rects().
*/
if (vblank_work->stream && vblank_work->stream->link) {
/*
* Prioritize replay, instead of psr
*/
if (vblank_work->stream->link->replay_settings.replay_feature_enabled)
amdgpu_dm_replay_enable(vblank_work->stream, false);
else if (vblank_work->enable) {
if (vblank_work->enable) {
if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
vblank_work->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(vblank_work->stream);
@ -138,7 +132,6 @@ static void vblank_control_worker(struct work_struct *work)
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
#endif
vblank_work->stream->link->panel_config.psr.disallow_replay &&
vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
amdgpu_dm_psr_enable(vblank_work->stream);
}

View file

@ -1948,6 +1948,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
wait_for_no_pipes_pending(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
* workaround applied during clock update
*/
dc_trigger_sync(dc, context);
}
if (dc->hwss.update_dsc_pg)

View file

@ -244,7 +244,7 @@ enum pixel_format {
#define DC_MAX_DIRTY_RECTS 3
struct dc_flip_addrs {
struct dc_plane_address address;
unsigned int flip_timestamp_in_us;
unsigned long long flip_timestamp_in_us;
bool flip_immediate;
/* TODO: add flip duration for FreeSync */
bool triplebuffer_flips;

View file

@ -2124,7 +2124,8 @@ static void dce110_reset_hw_ctx_wrap(
BREAK_TO_DEBUGGER();
}
pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
if (dc_is_hdmi_tmds_signal(pipe_ctx_old->stream->signal))
pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);

View file

@ -1054,7 +1054,8 @@ static void dcn10_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
}
for (i = 0; i < dc->res_pool->pipe_count; i++)

View file

@ -1792,6 +1792,8 @@ void dcn20_program_front_end_for_ctx(
int i;
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
/* Carry over GSL groups in case the context is changing. */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@ -1815,6 +1817,20 @@ void dcn20_program_front_end_for_ctx(
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
prev_hubp_count++;
if (context->res_ctx.pipe_ctx[i].plane_state)
hubp_count++;
}
if (prev_hubp_count == 0 && hubp_count > 0) {
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, true, false);
udelay(500);
}
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
@ -1962,6 +1978,10 @@ void dcn20_post_unlock_program_front_end(
}
}
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@ -2513,7 +2533,8 @@ static void dcn20_reset_back_end_for_pipe(
* the case where the same symclk is shared across multiple otg
* instances
*/
link->phy_state.symclk_ref_cnts.otg = 0;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
link->phy_state.symclk_ref_cnts.otg = 0;
if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
link_hwss->disable_link_output(link,
&pipe_ctx->link_res, pipe_ctx->stream->signal);

View file

@ -523,7 +523,8 @@ static void dcn31_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(

View file

@ -813,6 +813,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false,
/* Output */
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
@ -3317,6 +3319,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SwathHeightCThisState[k], v->TWait,
(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
mode_lib->vba.PrefetchModePerState[i][j] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],

View file

@ -3423,6 +3423,7 @@ bool dml32_CalculatePrefetchSchedule(
unsigned int SwathHeightC,
double TWait,
double TPreReq,
bool ExtendPrefetchIfPossible,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,
@ -3892,12 +3893,32 @@ bool dml32_CalculatePrefetchSchedule(
/* Clamp to oto for bandwidth calculation */
LinesForPrefetchBandwidth = dst_y_prefetch_oto;
} else {
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
TimeForFetchingMetaPTE = Tvm_equ;
TimeForFetchingRowInVBlank = Tr0_equ;
*PrefetchBandwidth = prefetch_bw_equ;
/* Clamp to equ for bandwidth calculation */
LinesForPrefetchBandwidth = dst_y_prefetch_equ;
/* For mode programming we want to extend the prefetch as much as possible
* (up to oto, or as long as we can for equ) if we're not already applying
* the 60us prefetch requirement. This is to avoid intermittent underflow
* issues during prefetch.
*
* The prefetch extension is applied under the following scenarios:
* 1. We're in prefetch mode > 0 (i.e. we don't support MCLK switch in blank)
* 2. We're using subvp or drr methods of p-state switch, in which case we
* we don't care if prefetch takes up more of the blanking time
*
* Mode programming typically chooses the smallest prefetch time possible
* (i.e. highest bandwidth during prefetch) presumably to create margin between
* p-states / c-states that happen in vblank and prefetch. Therefore we only
* apply this prefetch extension when p-state in vblank is not required (UCLK
* p-states take up the most vblank time).
*/
if (ExtendPrefetchIfPossible && TPreReq == 0 && VStartup < MaxVStartup) {
MyError = true;
} else {
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
TimeForFetchingMetaPTE = Tvm_equ;
TimeForFetchingRowInVBlank = Tr0_equ;
*PrefetchBandwidth = prefetch_bw_equ;
/* Clamp to equ for bandwidth calculation */
LinesForPrefetchBandwidth = dst_y_prefetch_equ;
}
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;

View file

@ -747,6 +747,7 @@ bool dml32_CalculatePrefetchSchedule(
unsigned int SwathHeightC,
double TWait,
double TPreReq,
bool ExtendPrefetchIfPossible,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,

View file

@ -1059,18 +1059,21 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
uint32_t denominator = 1;
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
* The 1.006 factor (margin 5300ppm + 300ppm ~ 0.6% as per spec) is not
* required when determining PBN/time slot utilization on the link between
* us and the branch, since that overhead is already accounted for in
* the get_pbn_per_slot function.
*
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
* common multiplier to render an integer PBN for all link rate/lane
* counts combinations
* calculate
* peak_kbps *= (1006/1000)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
* peak_kbps /= (8 * 1000) convert to bytes
*/
numerator = 64 * PEAK_FACTOR_X1000;
denominator = 54 * 8 * 1000 * 1000;
numerator = 64;
denominator = 54 * 8 * 1000;
kbps *= numerator;
peak_kbps = dc_fixpt_from_fraction(kbps, denominator);

View file

@ -50,6 +50,7 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
&& tmp->hpd_status
&& tmp->dpia_bw_alloc_config.bw_alloc_enabled);
}
static void reset_bw_alloc_struct(struct dc_link *link)
{
link->dpia_bw_alloc_config.bw_alloc_enabled = false;
@ -59,6 +60,11 @@ static void reset_bw_alloc_struct(struct dc_link *link)
link->dpia_bw_alloc_config.bw_granularity = 0;
link->dpia_bw_alloc_config.response_ready = false;
}
#define BW_GRANULARITY_0 4 // 0.25 Gbps
#define BW_GRANULARITY_1 2 // 0.5 Gbps
#define BW_GRANULARITY_2 1 // 1 Gbps
static uint8_t get_bw_granularity(struct dc_link *link)
{
uint8_t bw_granularity = 0;
@ -71,16 +77,20 @@ static uint8_t get_bw_granularity(struct dc_link *link)
switch (bw_granularity & 0x3) {
case 0:
bw_granularity = 4;
bw_granularity = BW_GRANULARITY_0;
break;
case 1:
bw_granularity = BW_GRANULARITY_1;
break;
case 2:
default:
bw_granularity = 2;
bw_granularity = BW_GRANULARITY_2;
break;
}
return bw_granularity;
}
static int get_estimated_bw(struct dc_link *link)
{
uint8_t bw_estimated_bw = 0;
@ -93,31 +103,7 @@ static int get_estimated_bw(struct dc_link *link)
return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
}
static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link)
{
if (bw_needed > 0)
*stream_allocated_bw += bw_needed;
return true;
}
static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link)
{
bool ret = false;
if (*stream_allocated_bw > 0) {
*stream_allocated_bw -= bw_to_dealloc;
ret = true;
} else {
//Do nothing for now
ret = true;
}
// Unplug so reset values
if (!link->hpd_status)
reset_bw_alloc_struct(link);
return ret;
}
/*
* Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
@ -128,7 +114,12 @@ static void init_usb4_bw_struct(struct dc_link *link)
// Init the known values
link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
__func__, link->dpia_bw_alloc_config.bw_granularity,
link->dpia_bw_alloc_config.estimated_bw);
}
static uint8_t get_lowest_dpia_index(struct dc_link *link)
{
const struct dc *dc_struct = link->dc;
@ -141,12 +132,15 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
if (idx > dc_struct->links[i]->link_index)
if (idx > dc_struct->links[i]->link_index) {
idx = dc_struct->links[i]->link_index;
break;
}
}
return idx;
}
/*
* Get the Max Available BW or Max Estimated BW for each Host Router
*
@ -186,6 +180,7 @@ static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
return total_bw;
}
/*
* Cleanup function for when the dpia is unplugged to reset struct
* and perform any required clean up
@ -194,42 +189,50 @@ static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
*
* return: none
*/
static bool dpia_bw_alloc_unplug(struct dc_link *link)
static void dpia_bw_alloc_unplug(struct dc_link *link)
{
if (!link)
return true;
return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
link->dpia_bw_alloc_config.sink_allocated_bw, link);
if (link) {
DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
__func__, link->link_index);
link->dpia_bw_alloc_config.sink_allocated_bw = 0;
reset_bw_alloc_struct(link);
}
}
static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
{
uint8_t requested_bw;
uint32_t temp;
// 1. Add check for this corner case #1
if (req_bw > link->dpia_bw_alloc_config.estimated_bw)
/* Error check whether request bw greater than allocated */
if (req_bw > link->dpia_bw_alloc_config.estimated_bw) {
DC_LOG_ERROR("%s: Request bw greater than estimated bw for link(%d)\n",
__func__, link->link_index);
req_bw = link->dpia_bw_alloc_config.estimated_bw;
}
temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
requested_bw = temp / Kbps_TO_Gbps;
// Always make sure to add more to account for floating points
/* Always make sure to add more to account for floating points */
if (temp % Kbps_TO_Gbps)
++requested_bw;
// 2. Add check for this corner case #2
/* Error check whether requested and allocated are equal */
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw)
return;
if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) {
DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
__func__, link->link_index);
}
if (core_link_write_dpcd(
link->dpia_bw_alloc_config.response_ready = false; // Reset flag
core_link_write_dpcd(
link,
REQUESTED_BW,
&requested_bw,
sizeof(uint8_t)) == DC_OK)
link->dpia_bw_alloc_config.response_ready = false; // Reset flag
sizeof(uint8_t));
}
/*
* Return the response_ready flag from dc_link struct
*
@ -241,6 +244,7 @@ static bool get_cm_response_ready_flag(struct dc_link *link)
{
return link->dpia_bw_alloc_config.response_ready;
}
// ------------------------------------------------------------------
// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
@ -277,27 +281,27 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
DPTX_BW_ALLOCATION_MODE_CONTROL,
&response,
sizeof(uint8_t)) != DC_OK) {
DC_LOG_DEBUG("%s: **** FAILURE Enabling DPtx BW Allocation Mode Support ***\n",
__func__);
DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n",
__func__, link->link_index);
} else {
// SUCCESS Enabled DPtx BW Allocation Mode Support
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
DC_LOG_DEBUG("%s: **** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n",
__func__);
DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n",
__func__, link->link_index);
ret = true;
init_usb4_bw_struct(link);
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
}
}
out:
return ret;
}
void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
{
int bw_needed = 0;
int estimated = 0;
int host_router_total_estimated_bw = 0;
if (!get_bw_alloc_proceed_flag((link)))
return;
@ -306,14 +310,22 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
case DPIA_BW_REQ_FAILED:
DC_LOG_DEBUG("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__);
/*
* Ideally, we shouldn't run into this case as we always validate available
* bandwidth and request within that limit
*/
estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
// Update the new Estimated BW value updated by CM
link->dpia_bw_alloc_config.estimated_bw =
bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
DC_LOG_ERROR("%s: BW REQ FAILURE for DP-TX Request for link(%d)\n",
__func__, link->link_index);
DC_LOG_ERROR("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
__func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
/* Update the new Estimated BW value updated by CM */
link->dpia_bw_alloc_config.estimated_bw = estimated;
/* Allocate the previously requested bandwidth */
set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
link->dpia_bw_alloc_config.response_ready = false;
/*
* If FAIL then it is either:
@ -326,68 +338,34 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
case DPIA_BW_REQ_SUCCESS:
DC_LOG_DEBUG("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__);
// 1. SUCCESS 1st time before any Pruning is done
// 2. SUCCESS after prev. FAIL before any Pruning is done
// 3. SUCCESS after Pruning is done but before enabling link
bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
// 1.
if (!link->dpia_bw_alloc_config.sink_allocated_bw) {
DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
__func__, link->link_index);
DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
__func__, link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed);
allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed, link);
link->dpia_bw_alloc_config.sink_verified_bw =
link->dpia_bw_alloc_config.sink_allocated_bw;
// SUCCESS from first attempt
if (link->dpia_bw_alloc_config.sink_allocated_bw >
link->dpia_bw_alloc_config.sink_max_bw)
link->dpia_bw_alloc_config.sink_verified_bw =
link->dpia_bw_alloc_config.sink_max_bw;
}
// 3.
else if (link->dpia_bw_alloc_config.sink_allocated_bw) {
// Find out how much do we need to de-alloc
if (link->dpia_bw_alloc_config.sink_allocated_bw > bw_needed)
deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
link->dpia_bw_alloc_config.sink_allocated_bw - bw_needed, link);
else
allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
bw_needed - link->dpia_bw_alloc_config.sink_allocated_bw, link);
}
// 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA
// => check if estimated_bw changed
link->dpia_bw_alloc_config.sink_allocated_bw = bw_needed;
link->dpia_bw_alloc_config.response_ready = true;
break;
case DPIA_EST_BW_CHANGED:
DC_LOG_DEBUG("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__);
estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED);
// 1. If due to unplug of other sink
if (estimated == host_router_total_estimated_bw) {
// First update the estimated & max_bw fields
if (link->dpia_bw_alloc_config.estimated_bw < estimated)
link->dpia_bw_alloc_config.estimated_bw = estimated;
}
// 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw
else {
// We lost estimated bw usually due to plug event of other dpia
link->dpia_bw_alloc_config.estimated_bw = estimated;
}
DC_LOG_DEBUG("%s: ESTIMATED BW CHANGED for link(%d)\n",
__func__, link->link_index);
DC_LOG_DEBUG("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
__func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
link->dpia_bw_alloc_config.estimated_bw = estimated;
break;
case DPIA_BW_ALLOC_CAPS_CHANGED:
DC_LOG_DEBUG("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__);
DC_LOG_ERROR("%s: BW ALLOC CAPABILITY CHANGED to Disabled for link(%d)\n",
__func__, link->link_index);
link->dpia_bw_alloc_config.bw_alloc_enabled = false;
break;
}
@ -409,11 +387,11 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
do {
if (!(timeout > 0))
if (timeout > 0)
timeout--;
else
break;
fsleep(10 * 1000);
drm_msleep(10);
} while (!get_cm_response_ready_flag(link));
if (!timeout)
@ -428,37 +406,36 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
out:
return ret;
}
int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
{
int ret = 0;
bool ret = false;
uint8_t timeout = 10;
DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
__func__, link->link_index, link->hpd_status,
link->dpia_bw_alloc_config.sink_allocated_bw, req_bw);
if (!get_bw_alloc_proceed_flag(link))
goto out;
/*
* Sometimes stream uses same timing parameters as the already
* allocated max sink bw so no need to re-alloc
*/
if (req_bw != link->dpia_bw_alloc_config.sink_allocated_bw) {
set_usb4_req_bw_req(link, req_bw);
do {
if (!(timeout > 0))
timeout--;
else
break;
udelay(10 * 1000);
} while (!get_cm_response_ready_flag(link));
set_usb4_req_bw_req(link, req_bw);
do {
if (timeout > 0)
timeout--;
else
break;
drm_msleep(10);
} while (!get_cm_response_ready_flag(link));
if (!timeout)
ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
}
if (timeout)
ret = true;
out:
DC_LOG_DEBUG("%s: EXIT: timeout(%d), ret(%d)\n", __func__, timeout, ret);
return ret;
}
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
{
bool ret = true;

View file

@ -59,9 +59,9 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
* @link: pointer to the dc_link struct instance
* @req_bw: Bw requested by the stream
*
* return: allocated bw else return 0
* return: true if allocated successfully
*/
int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
/*
* Handle the USB4 BW Allocation related functionality here:

View file

@ -240,7 +240,6 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
enum DC_DEBUG_MASK {
@ -251,7 +250,6 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PSR = 0x10,
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
DC_DISABLE_MPO = 0x40,
DC_DISABLE_REPLAY = 0x50,
DC_ENABLE_DPIA_TRACE = 0x80,
};

View file

@ -571,7 +571,8 @@ struct SET_SHADER_DEBUGGER {
struct {
uint32_t single_memop : 1; /* SQ_DEBUG.single_memop */
uint32_t single_alu_op : 1; /* SQ_DEBUG.single_alu_op */
uint32_t reserved : 30;
uint32_t reserved : 29;
uint32_t process_ctx_flush : 1;
};
uint32_t u32all;
} flags;

View file

@ -200,7 +200,7 @@ static int get_platform_power_management_table(
struct pp_hwmgr *hwmgr,
ATOM_Tonga_PPM_Table *atom_ppm_table)
{
struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
struct phm_ppm_table *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);

View file

@ -3999,6 +3999,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
uint32_t sclk, mclk, activity_percent;
uint32_t offset, val_vid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct amdgpu_device *adev = hwmgr->adev;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
@ -4042,7 +4043,21 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
if ((adev->asic_type != CHIP_HAWAII) &&
(adev->asic_type != CHIP_BONAIRE) &&
(adev->asic_type != CHIP_FIJI) &&
(adev->asic_type != CHIP_TONGA))
return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
else
return -EOPNOTSUPP;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
if ((adev->asic_type != CHIP_HAWAII) &&
(adev->asic_type != CHIP_BONAIRE) &&
(adev->asic_type != CHIP_FIJI) &&
(adev->asic_type != CHIP_TONGA))
return -EOPNOTSUPP;
else
return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX:
if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
(VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))

View file

@ -994,7 +994,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
{
struct drm_gem_object *obj;
struct drm_memory_stats status = {};
enum drm_gem_object_status supported_status;
enum drm_gem_object_status supported_status = 0;
int id;
spin_lock(&file->table_lock);

View file

@ -570,7 +570,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r = data;
struct drm_framebuffer *fb;
unsigned int i;
int ret;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;

View file

@ -353,7 +353,8 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
mipi_dsi_detach(dsi);
if (dsi->attached)
mipi_dsi_detach(dsi);
mipi_dsi_device_unregister(dsi);
return 0;
@ -378,11 +379,18 @@ EXPORT_SYMBOL(mipi_dsi_host_unregister);
int mipi_dsi_attach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
int ret;
if (!ops || !ops->attach)
return -ENOSYS;
return ops->attach(dsi->host, dsi);
ret = ops->attach(dsi->host, dsi);
if (ret)
return ret;
dsi->attached = true;
return 0;
}
EXPORT_SYMBOL(mipi_dsi_attach);
@ -394,9 +402,14 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
if (WARN_ON(!dsi->attached))
return -EINVAL;
if (!ops || !ops->detach)
return -ENOSYS;
dsi->attached = false;
return ops->detach(dsi->host, dsi);
}
EXPORT_SYMBOL(mipi_dsi_detach);

View file

@ -24,6 +24,7 @@
#define __DRM_COLOR_MGMT_H__
#include <linux/ctype.h>
#include <linux/math64.h>
#include <drm/drm_property.h>
struct drm_crtc;

View file

@ -27,6 +27,7 @@ struct mipi_dsi_device {
uint32_t channel;
uint32_t mode_flags;
#define MIPI_DSI_MODE_LPM (1 << 0)
bool attached;
};
struct mipi_dsi_msg {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: radiusd_standard.c,v 1.2 2024/01/08 04:16:48 yasuoka Exp $ */
/* $OpenBSD: radiusd_standard.c,v 1.3 2024/02/06 10:53:20 yasuoka Exp $ */
/*
* Copyright (c) 2013, 2023 Internet Initiative Japan Inc.
@ -269,7 +269,7 @@ module_standard_resdeco(void *ctx, u_int q_id, const u_char *req, size_t reqlen,
RADIUS_PACKET *radres = NULL;
struct attr *attr;
TAILQ_FOREACH(attr, &module->remove_reqattrs, next) {
TAILQ_FOREACH(attr, &module->remove_resattrs, next) {
if (radres == NULL &&
(radres = radius_convert_packet(res, reslen)) == NULL) {
syslog(LOG_ERR,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: application.c,v 1.41 2023/12/21 12:43:30 martijn Exp $ */
/* $OpenBSD: application.c,v 1.42 2024/02/06 12:44:27 martijn Exp $ */
/*
* Copyright (c) 2021 Martijn van Duren <martijn@openbsd.org>
@ -31,10 +31,11 @@
#include "application.h"
#include "log.h"
#include "mib.h"
#include "smi.h"
#include "snmp.h"
#include "snmpd.h"
#include "snmpe.h"
#include "mib.h"
#define OID(...) (struct ber_oid){ { __VA_ARGS__ }, \
(sizeof((uint32_t []) { __VA_ARGS__ }) / sizeof(uint32_t)) }
@ -135,7 +136,7 @@ struct snmp_target_mib {
void appl_agentcap_free(struct appl_agentcap *);
enum appl_error appl_region(struct appl_context *, uint32_t, uint8_t,
struct ber_oid *, int, int, struct appl_backend *);
struct ber_oid *, uint8_t, int, int, struct appl_backend *);
void appl_region_free(struct appl_context *, struct appl_region *);
enum appl_error appl_region_unregister_match(struct appl_context *, uint8_t,
struct ber_oid *, char *, struct appl_backend *, int);
@ -248,7 +249,7 @@ appl_addagentcaps(const char *ctxname, struct ber_oid *oid, const char *descr,
if (ctxname == NULL)
ctxname = "";
(void)smi_oid2string(oid, oidbuf, sizeof(oidbuf), 0);
mib_oid2string(oid, oidbuf, sizeof(oidbuf), snmpd_env->sc_oidfmt);
log_info("%s: Adding agent capabilities %s context(%s)",
backend->ab_name, oidbuf, ctxname);
@ -297,7 +298,7 @@ appl_removeagentcaps(const char *ctxname, struct ber_oid *oid,
if (ctxname == NULL)
ctxname = "";
(void)smi_oid2string(oid, oidbuf, sizeof(oidbuf), 0);
mib_oid2string(oid, oidbuf, sizeof(oidbuf), snmpd_env->sc_oidfmt);
log_info("%s: Removing agent capabilities %s context(%s)",
backend->ab_name, oidbuf, ctxname);
@ -449,18 +450,24 @@ appl_targetmib(struct ber_oid *oid)
enum appl_error
appl_region(struct appl_context *ctx, uint32_t timeout, uint8_t priority,
struct ber_oid *oid, int instance, int subtree,
struct ber_oid *oid, uint8_t range_subid, int instance, int subtree,
struct appl_backend *backend)
{
struct appl_region *region = NULL, *nregion;
char oidbuf[1024], regionbuf[1024], subidbuf[11];
size_t i;
size_t i, bo_n;
/* Don't use smi_oid2string, because appl_register can't use it */
oidbuf[0] = '\0';
for (i = 0; i < oid->bo_n; i++) {
if (i != 0)
strlcat(oidbuf, ".", sizeof(oidbuf));
bo_n = oid->bo_n;
if (range_subid != 0)
oid->bo_n = range_subid;
mib_oid2string(oid, oidbuf, sizeof(oidbuf), snmpd_env->sc_oidfmt);
if (range_subid != 0) {
oid->bo_n = bo_n;
i = range_subid + 1;
} else
i = oid->bo_n;
for (; i < oid->bo_n; i++) {
strlcat(oidbuf, ".", sizeof(oidbuf));
snprintf(subidbuf, sizeof(subidbuf), "%"PRIu32,
oid->bo_id[i]);
strlcat(oidbuf, subidbuf, sizeof(oidbuf));
@ -539,15 +546,21 @@ appl_register(const char *ctxname, uint32_t timeout, uint8_t priority,
struct appl_region *region, search;
char oidbuf[1024], subidbuf[11];
enum appl_error error;
size_t i;
size_t i, bo_n;
uint32_t lower_bound;
oidbuf[0] = '\0';
/* smi_oid2string can't do ranges */
for (i = 0; i < oid->bo_n; i++) {
bo_n = oid->bo_n;
if (range_subid != 0)
oid->bo_n = range_subid;
mib_oid2string(oid, oidbuf, sizeof(oidbuf), snmpd_env->sc_oidfmt);
if (range_subid != 0) {
oid->bo_n = bo_n;
i = range_subid + 1;
} else
i = oid->bo_n;
for (; i < oid->bo_n; i++) {
strlcat(oidbuf, ".", sizeof(oidbuf));
snprintf(subidbuf, sizeof(subidbuf), "%"PRIu32, oid->bo_id[i]);
if (i != 0)
strlcat(oidbuf, ".", sizeof(oidbuf));
if (range_subid == i + 1) {
strlcat(oidbuf, "[", sizeof(oidbuf));
strlcat(oidbuf, subidbuf, sizeof(oidbuf));
@ -587,8 +600,8 @@ appl_register(const char *ctxname, uint32_t timeout, uint8_t priority,
}
if (range_subid == 0)
return appl_region(ctx, timeout, priority, oid, instance,
subtree, backend);
return appl_region(ctx, timeout, priority, oid, range_subid,
instance, subtree, backend);
range_subid--;
if (range_subid >= oid->bo_n) {
@ -604,12 +617,13 @@ appl_register(const char *ctxname, uint32_t timeout, uint8_t priority,
lower_bound = oid->bo_id[range_subid];
do {
if ((error = appl_region(ctx, timeout, priority, oid, instance,
subtree, backend)) != APPL_ERROR_NOERROR)
if ((error = appl_region(ctx, timeout, priority, oid,
range_subid, instance, subtree,
backend)) != APPL_ERROR_NOERROR)
goto fail;
} while (oid->bo_id[range_subid]++ != upper_bound);
if ((error = appl_region(ctx, timeout, priority, oid, instance, subtree,
backend)) != APPL_ERROR_NOERROR)
if ((error = appl_region(ctx, timeout, priority, oid, range_subid,
instance, subtree, backend)) != APPL_ERROR_NOERROR)
goto fail;
return APPL_ERROR_NOERROR;
@ -1311,8 +1325,8 @@ appl_response(struct appl_backend *backend, int32_t requestid,
for (i = 1; vb != NULL; vb = vb->av_next, i++) {
if (!appl_varbind_valid(vb, origvb, next,
error != APPL_ERROR_NOERROR, backend->ab_range, &errstr)) {
smi_oid2string(&(vb->av_oid), oidbuf,
sizeof(oidbuf), 0);
mib_oid2string(&(vb->av_oid), oidbuf, sizeof(oidbuf),
snmpd_env->sc_oidfmt);
log_warnx("%s: %"PRIu32" %s: %s",
backend->ab_name, requestid, oidbuf, errstr);
invalid = 1;
@ -1756,15 +1770,16 @@ appl_pdu_log(struct appl_backend *backend, enum snmp_pdutype pdutype,
buf[0] = '\0';
for (vb = vblist; vb != NULL; vb = vb->av_next) {
strlcat(buf, "{", sizeof(buf));
strlcat(buf, smi_oid2string(&(vb->av_oid), oidbuf,
sizeof(oidbuf), 0), sizeof(buf));
strlcat(buf, mib_oid2string(&(vb->av_oid), oidbuf,
sizeof(oidbuf), snmpd_env->sc_oidfmt), sizeof(buf));
if (next) {
if (vb->av_include)
strlcat(buf, "(incl)", sizeof(buf));
if (vb->av_oid_end.bo_n > 0) {
strlcat(buf, "-", sizeof(buf));
strlcat(buf, smi_oid2string(&(vb->av_oid_end),
oidbuf, sizeof(oidbuf), 0), sizeof(buf));
strlcat(buf, mib_oid2string(&(vb->av_oid_end),
oidbuf, sizeof(oidbuf),
snmpd_env->sc_oidfmt), sizeof(buf));
}
}
strlcat(buf, ":", sizeof(buf));

View file

@ -1,4 +1,4 @@
/* $OpenBSD: application_agentx.c,v 1.15 2023/12/21 12:43:30 martijn Exp $ */
/* $OpenBSD: application_agentx.c,v 1.16 2024/02/06 12:44:27 martijn Exp $ */
/*
* Copyright (c) 2022 Martijn van Duren <martijn@openbsd.org>
*
@ -34,6 +34,7 @@
#include "application.h"
#include "ax.h"
#include "log.h"
#include "mib.h"
#include "smi.h"
#include "snmp.h"
#include "snmpd.h"
@ -558,7 +559,7 @@ appl_agentx_open(struct appl_agentx_connection *conn, struct ax_pdu *pdu)
TAILQ_INSERT_TAIL(&(conn->conn_sessions), session, sess_conn_entry);
appl_agentx_oid2ber_oid(&(session->sess_oid), &oid);
smi_oid2string(&oid, oidbuf, sizeof(oidbuf), 0);
mib_oid2string(&oid, oidbuf, sizeof(oidbuf), snmpd_env->sc_oidfmt);
log_info("%s: %s %s: Open", session->sess_backend.ab_name, oidbuf,
session->sess_descr.aos_string);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: application_internal.c,v 1.11 2023/12/21 12:43:31 martijn Exp $ */
/* $OpenBSD: application_internal.c,v 1.12 2024/02/06 12:44:27 martijn Exp $ */
/*
* Copyright (c) 2023 Martijn van Duren <martijn@openbsd.org>
@ -243,10 +243,9 @@ appl_internal_region(struct ber_oid *oid)
* Ignore requestDenied, duplicateRegistration, and unsupportedContext
*/
if (error == APPL_ERROR_PROCESSINGERROR ||
error == APPL_ERROR_PARSEERROR) {
smi_oid2string(oid, oidbuf, sizeof(oidbuf), 0);
fatalx("internal: Failed to register %s", oidbuf);
}
error == APPL_ERROR_PARSEERROR)
fatalx("internal: Failed to register %s", mib_oid2string(oid,
oidbuf, sizeof(oidbuf), snmpd_env->sc_oidfmt));
}
void
@ -267,7 +266,8 @@ appl_internal_object(struct ber_oid *oid,
if (RB_INSERT(appl_internal_objects,
&appl_internal_objects, obj) != NULL)
fatalx("%s: %s already registered", __func__,
smi_oid2string(oid, buf, sizeof(buf), 0));
mib_oid2string(oid, buf, sizeof(buf),
snmpd_env->sc_oidfmt));
}
const char *

View file

@ -1,4 +1,4 @@
/* $OpenBSD: parse.y,v 1.86 2023/12/21 12:43:31 martijn Exp $ */
/* $OpenBSD: parse.y,v 1.89 2024/02/06 15:36:11 martijn Exp $ */
/*
* Copyright (c) 2007, 2008, 2012 Reyk Floeter <reyk@openbsd.org>
@ -97,15 +97,53 @@ struct sym {
int symset(const char *, const char *, int);
char *symget(const char *);
struct oid_sym {
char *descriptor;
char file[PATH_MAX];
int lineno;
};
struct object_sym {
struct oid_sym oid;
char *name;
int isint;
union {
int32_t intval;
char *sval;
};
};
struct trapcmd_sym {
struct oid_sym oid;
struct trapcmd *cmd;
};
struct trapaddress_sym {
struct oid_sym oid;
struct trap_address *tr;
};
struct snmpd *conf = NULL;
static int errors = 0;
static struct usmuser *user = NULL;
static struct ber_oid *smi_object;
static int mibparsed = 0;
static struct oid_sym *blocklist = NULL;
static size_t nblocklist = 0;
static struct oid_sym sysoid = {};
static struct object_sym *objects = NULL;
static size_t nobjects = 0;
static struct trapcmd_sym *trapcmds = NULL;
static size_t ntrapcmds = 0;
static struct trapaddress_sym *trapaddresses = NULL;
static size_t ntrapaddresses = 0;
static uint8_t engineid[SNMPD_MAXENGINEIDLEN];
static int32_t enginepen;
static size_t engineidlen;
int resolve_oid(struct ber_oid *, struct oid_sym *);
int resolve_oids(void);
int host(const char *, const char *, int, int,
struct sockaddr_storage *, int);
int listen_add(struct sockaddr_storage *, int, int);
@ -116,7 +154,7 @@ typedef struct {
char *string;
struct host *host;
struct timeval tv;
struct ber_oid *oid;
struct oid_sym oid;
struct agentx_master ax;
struct {
int type;
@ -145,6 +183,7 @@ typedef struct {
%token READONLY READWRITE OCTETSTRING INTEGER COMMUNITY TRAP RECEIVER
%token SECLEVEL NONE AUTH ENC USER AUTHKEY ENCKEY ERROR
%token HANDLE DEFAULT SRCADDR TCP UDP BLOCKLIST PORT
%token MIB DIRECTORY
%token <v.string> STRING
%token <v.number> NUMBER
%type <v.string> usmuser community optcommunity
@ -166,6 +205,7 @@ grammar : /* empty */
| grammar main '\n'
| grammar system '\n'
| grammar object '\n'
| grammar mib '\n'
| grammar error '\n' { file->errors++; }
;
@ -305,31 +345,31 @@ main : LISTEN ON listen_udptcp
}
| TRAP RECEIVER host
| TRAP HANDLE trapoid cmd {
struct trapcmd *cmd = $4.data;
struct trapcmd_sym *ttrapcmds;
cmd->cmd_oid = $3;
if (trapcmd_add(cmd) != 0) {
free($3);
free(cmd);
yyerror("duplicate oid");
if ((ttrapcmds = recallocarray(trapcmds, ntrapcmds,
ntrapcmds + 1, sizeof(*trapcmds))) == NULL) {
yyerror("malloc");
free($3.descriptor);
free($4.data);
YYERROR;
}
conf->sc_traphandler = 1;
trapcmds = ttrapcmds;
trapcmds[ntrapcmds].oid = $3;
trapcmds[ntrapcmds++].cmd = $4.data;
}
| BLOCKLIST oid {
struct ber_oid *blocklist;
struct oid_sym *tblocklist;
blocklist = recallocarray(conf->sc_blocklist,
conf->sc_nblocklist, conf->sc_nblocklist + 1,
sizeof(*blocklist));
if (blocklist == NULL) {
if ((tblocklist = recallocarray(blocklist, nblocklist,
nblocklist + 1, sizeof(*blocklist))) == NULL) {
yyerror("malloc");
free($2.descriptor);
YYERROR;
}
conf->sc_blocklist = blocklist;
blocklist[conf->sc_nblocklist++] = *$2;
free($2);
blocklist = tblocklist;
blocklist[nblocklist++] = $2;
}
| RTFILTER yesno {
conf->sc_rtfilter = $2;
@ -818,13 +858,12 @@ sysmib : CONTACT STRING {
free($2);
}
| OBJECTID oid {
if (conf->sc_system.sys_oid.bo_n != 0) {
if (sysoid.descriptor != NULL) {
yyerror("system oid already defined");
free($2);
free($2.descriptor);
YYERROR;
}
conf->sc_system.sys_oid = *$2;
free($2);
sysoid = $2;
}
| SERVICES NUMBER {
if (conf->sc_system.sys_services != -1) {
@ -843,24 +882,22 @@ sysmib : CONTACT STRING {
;
object : OBJECTID oid NAME STRING optwrite {
const char *error;
struct object_sym *tobjects;
smi_object = $2;
error = smi_insert($2, $4);
free($4);
if (error != NULL) {
yyerror("%s", error);
free($2);
YYERROR;
if ((tobjects = recallocarray(objects, nobjects,
nobjects + 1, sizeof(*objects))) == NULL) {
yyerror("malloc");
free($2.descriptor);
free($4);
}
} objectvalue {
free(smi_object);
}
objects = tobjects;
nobjects++;
objects[nobjects - 1].oid = $2;
objects[nobjects - 1].name = $4;
} objectvalue
;
objectvalue : INTEGER NUMBER {
const char *error;
if ($2 < INT32_MIN) {
yyerror("number too small");
YYERROR;
@ -869,22 +906,13 @@ objectvalue : INTEGER NUMBER {
yyerror("number too large");
YYERROR;
}
error = appl_internal_object_int(smi_object, $2);
if (error != NULL) {
yyerror("%s", error);
YYERROR;
}
objects[nobjects - 1].isint = 1;
objects[nobjects - 1].intval = $2;
}
| OCTETSTRING STRING {
const char *error;
error = appl_internal_object_string(smi_object, $2);
if (error != NULL) {
yyerror("%s", error);
free($2);
YYERROR;
}
objects[nobjects - 1].isint = 0;
objects[nobjects - 1].sval = $2;
}
;
@ -893,37 +921,28 @@ optwrite : READONLY { $$ = 0; }
;
oid : STRING {
struct ber_oid *oid;
if ((oid = calloc(1, sizeof(*oid))) == NULL) {
yyerror("calloc");
free($1);
YYERROR;
}
if (smi_string2oid($1, oid) == -1) {
yyerror("invalid OID: %s", $1);
free(oid);
free($1);
YYERROR;
}
free($1);
$$ = oid;
$$.descriptor = $1;
strlcpy($$.file, file->name, sizeof($$.file));
$$.lineno = file->lineno;
}
;
trapoid : oid { $$ = $1; }
| DEFAULT {
struct ber_oid *sysoid;
if ((sysoid =
calloc(1, sizeof(*sysoid))) == NULL) {
yyerror("calloc");
if (($$.descriptor = strdup("1.3")) == NULL) {
yyerror("malloc");
YYERROR;
}
ober_string2oid("1.3", sysoid);
$$ = sysoid;
strlcpy($$.file, file->name, sizeof($$.file));
$$.lineno = file->lineno;
}
;
hostoid : /* empty */ { $$ = NULL; }
hostoid : /* empty */ {
$$.descriptor = NULL;
strlcpy($$.file, file->name, sizeof($$.file));
$$.lineno = file->lineno;
}
| OBJECTID oid { $$ = $2; }
;
@ -1014,41 +1033,54 @@ srcaddr : /* empty */ { $$ = NULL; }
;
hostdef : STRING hostoid hostauth srcaddr {
struct sockaddr_storage ss;
struct sockaddr_storage ss, ssl = {};
struct trap_address *tr;
if ((tr = calloc(1, sizeof(*tr))) == NULL) {
yyerror("calloc");
YYERROR;
}
struct trapaddress_sym *ttrapaddresses;
if (host($1, SNMPTRAP_PORT, AF_UNSPEC, SOCK_DGRAM,
&ss, 1) <= 0) {
yyerror("invalid host: %s", $1);
free($1);
free($2);
free($2.descriptor);
free($3.data);
free($4);
free(tr);
YYERROR;
}
free($1);
memcpy(&(tr->ta_ss), &ss, sizeof(ss));
if ($4 != NULL) {
if (host($4, "0", ss.ss_family, SOCK_DGRAM,
&ss, 1) <= 0) {
yyerror("invalid source-address: %s",
$4);
free($2);
free($2.descriptor);
free($3.data);
free($4);
free(tr);
YYERROR;
}
free($4);
memcpy(&(tr->ta_sslocal), &ss, sizeof(ss));
}
tr->ta_oid = $2;
ttrapaddresses = reallocarray(trapaddresses,
ntrapaddresses + 1, sizeof(*trapaddresses));
if (ttrapaddresses == NULL) {
yyerror("malloc");
free($2.descriptor);
free($3.data);
YYERROR;
}
trapaddresses = ttrapaddresses;
ntrapaddresses++;
if ((tr = calloc(1, sizeof(*tr))) == NULL) {
yyerror("calloc");
free($2.descriptor);
free($3.data);
ntrapaddresses--;
YYERROR;
}
tr->ta_ss = ss;
tr->ta_sslocal = ssl;
tr->ta_version = $3.type;
if ($3.type == SNMP_V2) {
(void)strlcpy(tr->ta_community, $3.data,
@ -1058,7 +1090,9 @@ hostdef : STRING hostoid hostauth srcaddr {
tr->ta_usmusername = $3.data;
tr->ta_seclevel = $3.value;
}
TAILQ_INSERT_TAIL(&(conf->sc_trapreceivers), tr, entry);
trapaddresses[ntrapaddresses - 1].oid = $2;
trapaddresses[ntrapaddresses - 1].tr = tr;
}
;
@ -1188,6 +1222,12 @@ cmd : STRING {
}
;
mib : MIB DIRECTORY STRING {
mib_parsedir($3);
mibparsed = 1;
}
;
%%
struct keywords {
@ -1231,6 +1271,7 @@ lookup(char *s)
{ "contact", CONTACT },
{ "default", DEFAULT },
{ "description", DESCR },
{ "directory", DIRECTORY },
{ "enc", ENC },
{ "enckey", ENCKEY },
{ "engineid", ENGINEID },
@ -1245,6 +1286,7 @@ lookup(char *s)
{ "listen", LISTEN },
{ "location", LOCATION },
{ "mac", MAC },
{ "mib", MIB },
{ "mode", MODE },
{ "name", NAME },
{ "none", NONE },
@ -1630,6 +1672,105 @@ popfile(void)
return (file ? 0 : EOF);
}
int
resolve_oid(struct ber_oid *dst, struct oid_sym *src)
{
struct file f = { .name = src->file, };
const char *error;
file = &f;
yylval.lineno = src->lineno;
if ((error = mib_string2oid(src->descriptor, dst)) != NULL) {
if (smi_string2oid(src->descriptor, dst) == -1) {
yyerror("%s", error);
free(src->descriptor);
return -1;
}
yyerror("deprecated oid format");
}
free(src->descriptor);
return 0;
}
int
resolve_oids(void)
{
struct file f;
struct ber_oid oid;
const char *error;
size_t i;
conf->sc_blocklist = calloc(nblocklist, sizeof(*conf->sc_blocklist));
if (conf->sc_blocklist == NULL)
fatal("malloc");
conf->sc_nblocklist = nblocklist;
for (i = 0; i < nblocklist; i++) {
if (resolve_oid(&conf->sc_blocklist[i], &blocklist[i]) == -1)
return -1;
}
free(blocklist);
if (sysoid.descriptor != NULL) {
if (resolve_oid(&conf->sc_system.sys_oid, &sysoid) == -1)
return -1;
}
for (i = 0; i < nobjects; i++) {
if (resolve_oid(&oid, &objects[i].oid) == -1)
return -1;
file = &f;
f.name = objects[i].oid.file;
yylval.lineno = objects[i].oid.lineno;
if ((error = smi_insert(&oid, objects[i].name)) != NULL) {
yyerror("%s", error);
return -1;
}
if (objects[i].isint) {
if ((error = appl_internal_object_int(
&oid, objects[i].intval)) != NULL) {
yyerror("%s", error);
return -1;
}
} else {
if ((error = appl_internal_object_string(
&oid, objects[i].sval)) != NULL) {
yyerror("%s", error);
return -1;
}
}
free(objects[i].name);
}
free(objects);
for (i = 0; i < ntrapcmds; i++) {
if (resolve_oid(
&trapcmds[i].cmd->cmd_oid, &trapcmds[i].oid) == -1)
return -1;
f.name = trapcmds[i].oid.file;
yylval.lineno = trapcmds[i].oid.lineno;
file = &f;
if (trapcmd_add(trapcmds[i].cmd) != 0) {
yyerror("duplicate oid");
return -1;
}
}
free(trapcmds);
for (i = 0; i < ntrapaddresses; i++) {
if (resolve_oid(
&trapaddresses[i].tr->ta_oid, &trapaddresses[i].oid) == -1)
return -1;
TAILQ_INSERT_TAIL(&conf->sc_trapreceivers,
trapaddresses[i].tr, entry);
}
free(trapaddresses);
return 0;
}
struct snmpd *
parse_config(const char *filename, u_int flags)
{
@ -1651,6 +1792,10 @@ parse_config(const char *filename, u_int flags)
conf->sc_system.sys_services = -1;
conf->sc_flags = flags;
conf->sc_oidfmt =
flags & SNMPD_F_NONAMES ? MIB_OIDNUMERIC : MIB_OIDSYMBOLIC;
conf->sc_confpath = filename;
TAILQ_INIT(&conf->sc_addresses);
TAILQ_INIT(&conf->sc_agentx_masters);
@ -1670,6 +1815,20 @@ parse_config(const char *filename, u_int flags)
endservent();
if (errors) {
free(conf);
return (NULL);
}
if (!mibparsed)
mib_parsedir("/usr/share/snmp/mibs");
mib_resolve();
if (resolve_oids() == -1) {
free(conf);
return NULL;
}
if (uname(&u) == -1)
fatal("uname");
@ -1725,12 +1884,12 @@ parse_config(const char *filename, u_int flags)
if (h->flags & ADDRESS_FLAG_NOTIFY)
found = 1;
}
if (conf->sc_traphandler && !found) {
if (ntrapcmds && !found) {
log_warnx("trap handler needs at least one notify listener");
free(conf);
return (NULL);
}
if (!conf->sc_traphandler && found) {
if (!ntrapcmds && found) {
log_warnx("notify listener needs at least one trap handler");
free(conf);
return (NULL);
@ -1768,11 +1927,6 @@ parse_config(const char *filename, u_int flags)
}
}
if (errors) {
free(conf);
return (NULL);
}
return (conf);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: smi.c,v 1.39 2023/12/21 12:43:31 martijn Exp $ */
/* $OpenBSD: smi.c,v 1.40 2024/02/06 12:44:27 martijn Exp $ */
/*
* Copyright (c) 2007, 2008 Reyk Floeter <reyk@openbsd.org>
@ -470,8 +470,8 @@ smi_print_element(struct ber_element *root)
case BER_TYPE_OBJECT:
if (ober_get_oid(root, &o) == -1)
goto fail;
if (asprintf(&str, "%s", smi_oid2string(&o, strbuf,
sizeof(strbuf), 0)) == -1)
if (asprintf(&str, "%s", mib_oid2string(&o, strbuf,
sizeof(strbuf), snmpd_env->sc_oidfmt)) == -1)
goto fail;
break;
case BER_TYPE_OCTETSTRING:

View file

@ -1,4 +1,4 @@
.\" $OpenBSD: snmpd.conf.5,v 1.63 2023/04/25 13:36:01 op Exp $
.\" $OpenBSD: snmpd.conf.5,v 1.64 2024/02/06 12:39:13 martijn Exp $
.\"
.\" Copyright (c) 2007, 2008, 2012 Reyk Floeter <reyk@openbsd.org>
.\"
@ -14,7 +14,7 @@
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
.Dd $Mdocdate: April 25 2023 $
.Dd $Mdocdate: February 6 2024 $
.Dt SNMPD.CONF 5
.Os
.Sh NAME
@ -231,6 +231,11 @@ RFC1910 legacy format.
must be 8 bytes
.Pq or 16 characters in hexadecimal format .
.El
.It Ic mib directory Ar path
Specify which directories to recursively search for MIB files.
Multiple directories can be specified.
If no directory is specified it defaults to
.Pa /usr/share/snmp/mibs .
.It Ic read-only community Ar string
Specify the name of the read-only community.
There is no default value.

View file

@ -1,4 +1,4 @@
/* $OpenBSD: snmpd.h,v 1.117 2024/01/16 13:33:12 claudio Exp $ */
/* $OpenBSD: snmpd.h,v 1.119 2024/02/06 15:36:11 martijn Exp $ */
/*
* Copyright (c) 2007, 2008, 2012 Reyk Floeter <reyk@openbsd.org>
@ -36,6 +36,7 @@
#include <stddef.h>
#include <stdint.h>
#include "mib.h"
#include "snmp.h"
#ifndef nitems
@ -340,7 +341,7 @@ struct trap_address {
int ta_seclevel;
};
};
struct ber_oid *ta_oid;
struct ber_oid ta_oid;
TAILQ_ENTRY(trap_address) entry;
};
@ -396,6 +397,7 @@ struct snmpd {
#define SNMPD_F_VERBOSE 0x01
#define SNMPD_F_DEBUG 0x02
#define SNMPD_F_NONAMES 0x04
enum mib_oidfmt sc_oidfmt;
const char *sc_confpath;
struct addresslist sc_addresses;
@ -426,7 +428,7 @@ struct snmpd {
};
struct trapcmd {
struct ber_oid *cmd_oid;
struct ber_oid cmd_oid;
/* sideways return for intermediate lookups */
struct trapcmd *cmd_maybe;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: trap.c,v 1.41 2023/12/21 12:43:31 martijn Exp $ */
/* $OpenBSD: trap.c,v 1.43 2024/02/06 15:36:11 martijn Exp $ */
/*
* Copyright (c) 2008 Reyk Floeter <reyk@openbsd.org>
@ -54,7 +54,7 @@ trap_send(struct ber_oid *oid, struct ber_element *elm)
if (TAILQ_EMPTY(&snmpd_env->sc_trapreceivers))
return (0);
smi_oid2string(oid, ostr, sizeof(ostr), 0);
mib_oid2string(oid, ostr, sizeof(ostr), snmpd_env->sc_oidfmt);
log_debug("trap_send: oid %s", ostr);
/* Add mandatory varbind elements */
@ -67,9 +67,9 @@ trap_send(struct ber_oid *oid, struct ber_element *elm)
ober_link_elements(vblist, elm);
TAILQ_FOREACH(tr, &snmpd_env->sc_trapreceivers, entry) {
if (tr->ta_oid != NULL && tr->ta_oid->bo_n) {
if (tr->ta_oid.bo_n) {
/* The trap receiver may want only a specified MIB */
r = ober_oid_cmp(oid, tr->ta_oid);
r = ober_oid_cmp(oid, &tr->ta_oid);
if (r != 0 && r != 2)
continue;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: traphandler.c,v 1.25 2023/12/21 12:43:31 martijn Exp $ */
/* $OpenBSD: traphandler.c,v 1.27 2024/02/06 15:36:11 martijn Exp $ */
/*
* Copyright (c) 2014 Bret Stephen Lambert <blambert@openbsd.org>
@ -34,6 +34,7 @@
#include <unistd.h>
#include "log.h"
#include "mib.h"
#include "smi.h"
#include "snmp.h"
#include "snmpd.h"
@ -332,7 +333,8 @@ trapcmd_exec(struct trapcmd *cmd, struct sockaddr *sa,
if (socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, s) == -1) {
log_warn("could not create pipe for OID '%s'",
smi_oid2string(cmd->cmd_oid, oidbuf, sizeof(oidbuf), 0));
mib_oid2string(&cmd->cmd_oid, oidbuf, sizeof(oidbuf),
snmpd_env->sc_oidfmt));
return;
}
@ -350,13 +352,15 @@ trapcmd_exec(struct trapcmd *cmd, struct sockaddr *sa,
/* this shouldn't happen */
log_warn("could not exec trap command for OID '%s'",
smi_oid2string(cmd->cmd_oid, oidbuf, sizeof(oidbuf), 0));
mib_oid2string(&cmd->cmd_oid, oidbuf, sizeof(oidbuf),
snmpd_env->sc_oidfmt));
_exit(1);
/* NOTREACHED */
case -1:
log_warn("could not fork trap command for OID '%s'",
smi_oid2string(cmd->cmd_oid, oidbuf, sizeof(oidbuf), 0));
mib_oid2string(&cmd->cmd_oid, oidbuf, sizeof(oidbuf),
snmpd_env->sc_oidfmt));
close(s[0]);
close(s[1]);
return;
@ -423,7 +427,7 @@ trapcmd_lookup(struct ber_oid *oid)
struct trapcmd key, *res;
bzero(&key, sizeof(key));
key.cmd_oid = oid;
key.cmd_oid = *oid;
if ((res = RB_FIND(trapcmd_tree, &trapcmd_tree, &key)) == NULL)
res = key.cmd_maybe;
@ -435,7 +439,7 @@ trapcmd_cmp(struct trapcmd *cmd1, struct trapcmd *cmd2)
{
int ret;
ret = ober_oid_cmp(cmd1->cmd_oid, cmd2->cmd_oid);
ret = ober_oid_cmp(&cmd1->cmd_oid, &cmd2->cmd_oid);
switch (ret) {
case 2:
/* cmd1 is a child of cmd2 */
@ -458,6 +462,5 @@ trapcmd_free(struct trapcmd *cmd)
{
RB_REMOVE(trapcmd_tree, &trapcmd_tree, cmd);
free(cmd->cmd_argv);
free(cmd->cmd_oid);
free(cmd);
}