sync with OpenBSD -current

This commit is contained in:
purplerain 2024-03-27 04:10:08 +00:00
parent 56a087cff9
commit 0189975fb5
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
61 changed files with 1691 additions and 1177 deletions

View file

@ -3448,22 +3448,34 @@ amdgpu_init_backlight(struct amdgpu_device *adev)
struct backlight_device *bd = adev->dm.backlight_dev[0];
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct amdgpu_dm_connector *aconnector;
if (bd == NULL)
return;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
connector->connector_type != DRM_MODE_CONNECTOR_DSI)
aconnector = to_amdgpu_dm_connector(connector);
if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
continue;
if (aconnector->bl_idx == -1)
continue;
dev->registered = false;
connector->registration_state = DRM_CONNECTOR_UNREGISTERED;
connector->backlight_device = bd;
connector->backlight_property = drm_property_create_range(dev,
0, "Backlight", 0, bd->props.max_brightness);
drm_object_attach_property(&connector->base,
connector->backlight_property, bd->props.brightness);
connector->registration_state = DRM_CONNECTOR_REGISTERED;
dev->registered = true;
break;
}
drm_connector_list_iter_end(&conn_iter);
}

View file

@ -1449,8 +1449,18 @@ i915_gem_mmap(struct file *filp, vm_prot_t accessprot,
* destroyed and will be invalid when the vma manager lock
* is released.
*/
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);
if (!node->driver_private) {
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);
GEM_BUG_ON(obj && obj->ops->mmap_ops);
} else {
obj = i915_gem_object_get_rcu
(container_of(node, struct drm_i915_gem_object,
base.vma_node));
GEM_BUG_ON(obj && !obj->ops->mmap_ops);
}
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
rcu_read_unlock();
@ -1464,6 +1474,9 @@ i915_gem_mmap(struct file *filp, vm_prot_t accessprot,
}
}
if (obj->ops->mmap_ops)
uvm_obj_init(&obj->base.uobj, obj->ops->mmap_ops, 1);
return &obj->base.uobj;
}

View file

@ -114,7 +114,11 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *obj);
#ifdef __linux__
const struct vm_operations_struct *mmap_ops;
#else
const struct uvm_pagerops *mmap_ops;
#endif
const char *name; /* friendly name for debug, e.g. lockdep classes */
};

View file

@ -1067,7 +1067,8 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
ttm_bo_put(i915_gem_to_ttm(obj));
}
#ifdef notyet
#ifdef __linux__
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
{
struct vm_area_struct *area = vmf->vma;
@ -1219,6 +1220,187 @@ static const struct vm_operations_struct vm_ops_ttm = {
.close = ttm_vm_close,
};
#else /* !__linux__ */
static int
vm_fault_ttm(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
int npages, int centeridx, vm_fault_t fault_type,
vm_prot_t access_type, int flags)
{
struct uvm_object *uobj = ufi->entry->object.uvm_obj;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
intel_wakeref_t wakeref = 0;
vm_fault_t ret;
int idx;
int write = !!(access_type & PROT_WRITE);
/* Sanity check that we allow writing into this object */
if (unlikely(i915_gem_object_is_readonly(obj) && write)) {
uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
return VM_PAGER_BAD;
}
ret = ttm_bo_vm_reserve(bo);
if (ret) {
switch (ret) {
case VM_FAULT_NOPAGE:
ret = VM_PAGER_OK;
break;
case VM_FAULT_RETRY:
ret = VM_PAGER_REFAULT;
break;
default:
ret = VM_PAGER_BAD;
break;
}
uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
return ret;
}
if (obj->mm.madv != I915_MADV_WILLNEED) {
dma_resv_unlock(bo->base.resv);
uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
return VM_PAGER_BAD;
}
/*
* This must be swapped out with shmem ttm_tt (pipeline-gutting).
* Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as
* far as far doing a ttm_bo_move_null(), which should skip all the
* other junk.
*/
if (!bo->resource) {
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = true, /* should be idle already */
};
int err;
GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
if (err) {
dma_resv_unlock(bo->base.resv);
uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
return VM_PAGER_BAD;
}
} else if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
for (i = 0; i < obj->mm.n_placements; i++) {
struct intel_memory_region *mr = obj->mm.placements[i];
unsigned int flags;
if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
continue;
flags = obj->flags;
flags &= ~I915_BO_ALLOC_GPU_ONLY;
err = __i915_ttm_migrate(obj, mr, flags);
if (!err)
break;
}
if (err) {
drm_dbg(dev, "Unable to make resource CPU accessible(err = %pe)\n",
ERR_PTR(err));
dma_resv_unlock(bo->base.resv);
ret = VM_FAULT_SIGBUS;
goto out_rpm;
}
}
if (i915_ttm_cpu_maps_iomem(bo->resource))
wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
if (drm_dev_enter(dev, &idx)) {
ret = ttm_bo_vm_fault_reserved(ufi, vaddr,
TTM_BO_VM_NUM_PREFAULT, 1);
drm_dev_exit(idx);
} else {
STUB();
#ifdef notyet
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
#else
STUB();
ret = VM_FAULT_NOPAGE;
#endif
}
#ifdef __linux__
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
goto out_rpm;
#endif
/*
* ttm_bo_vm_reserve() already has dma_resv_lock.
* userfault_count is protected by dma_resv lock and rpm wakeref.
*/
if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
obj->userfault_count = 1;
spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
}
if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
i915_ttm_adjust_lru(obj);
dma_resv_unlock(bo->base.resv);
out_rpm:
switch (ret) {
case VM_FAULT_NOPAGE:
ret = VM_PAGER_OK;
break;
case VM_FAULT_RETRY:
ret = VM_PAGER_REFAULT;
break;
default:
ret = VM_PAGER_BAD;
break;
}
if (wakeref)
intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
return ret;
}
static void
ttm_vm_reference(struct uvm_object *uobj)
{
struct drm_i915_gem_object *obj =
i915_ttm_to_gem((struct ttm_buffer_object *)uobj);
i915_gem_object_get(obj);
}
static void
ttm_vm_detach(struct uvm_object *uobj)
{
struct drm_i915_gem_object *obj =
i915_ttm_to_gem((struct ttm_buffer_object *)uobj);
i915_gem_object_put(obj);
}
const struct uvm_pagerops vm_ops_ttm = {
.pgo_fault = vm_fault_ttm,
.pgo_reference = ttm_vm_reference,
.pgo_detach = ttm_vm_detach,
};
#endif
static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
@ -1272,9 +1454,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.mmap_offset = i915_ttm_mmap_offset,
.unmap_virtual = i915_ttm_unmap_virtual,
#ifdef notyet
.mmap_ops = &vm_ops_ttm,
#endif
};
void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_lock.c,v 1.72 2022/04/26 15:31:14 dv Exp $ */
/* $OpenBSD: kern_lock.c,v 1.73 2024/03/26 18:18:30 bluhm Exp $ */
/*
* Copyright (c) 2017 Visa Hankala
@ -264,15 +264,17 @@ mtx_enter(struct mutex *mtx)
spc->spc_spinning++;
while (mtx_enter_try(mtx) == 0) {
CPU_BUSY_CYCLE();
do {
CPU_BUSY_CYCLE();
#ifdef MP_LOCKDEBUG
if (--nticks == 0) {
db_printf("%s: %p lock spun out\n", __func__, mtx);
db_enter();
nticks = __mp_lock_spinout;
}
if (--nticks == 0) {
db_printf("%s: %p lock spun out\n",
__func__, mtx);
db_enter();
nticks = __mp_lock_spinout;
}
#endif
} while (mtx->mtx_owner != NULL);
}
spc->spc_spinning--;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sys_socket.c,v 1.61 2023/04/15 13:18:28 kn Exp $ */
/* $OpenBSD: sys_socket.c,v 1.62 2024/03/26 09:46:47 mvs Exp $ */
/* $NetBSD: sys_socket.c,v 1.13 1995/08/12 23:59:09 mycroft Exp $ */
/*
@ -144,9 +144,11 @@ soo_stat(struct file *fp, struct stat *ub, struct proc *p)
memset(ub, 0, sizeof (*ub));
ub->st_mode = S_IFSOCK;
solock(so);
mtx_enter(&so->so_rcv.sb_mtx);
if ((so->so_rcv.sb_state & SS_CANTRCVMORE) == 0 ||
so->so_rcv.sb_cc != 0)
ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH;
mtx_leave(&so->so_rcv.sb_mtx);
if ((so->so_snd.sb_state & SS_CANTSENDMORE) == 0)
ub->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
ub->st_uid = so->so_euid;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_socket.c,v 1.321 2024/03/22 17:34:11 mvs Exp $ */
/* $OpenBSD: uipc_socket.c,v 1.322 2024/03/26 09:46:47 mvs Exp $ */
/* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */
/*
@ -160,6 +160,9 @@ soalloc(const struct protosw *prp, int wait)
break;
}
break;
case AF_UNIX:
so->so_rcv.sb_flags |= SB_MTXLOCK;
break;
}
return (so);
@ -987,8 +990,11 @@ dontblock:
* Dispose of any SCM_RIGHTS message that went
* through the read path rather than recv.
*/
if (pr->pr_domain->dom_dispose)
if (pr->pr_domain->dom_dispose) {
sb_mtx_unlock(&so->so_rcv);
pr->pr_domain->dom_dispose(cm);
sb_mtx_lock(&so->so_rcv);
}
m_free(cm);
}
}
@ -1173,8 +1179,11 @@ dontblock:
}
SBLASTRECORDCHK(&so->so_rcv, "soreceive 4");
SBLASTMBUFCHK(&so->so_rcv, "soreceive 4");
if (pr->pr_flags & PR_WANTRCVD)
if (pr->pr_flags & PR_WANTRCVD) {
sb_mtx_unlock(&so->so_rcv);
pru_rcvd(so);
sb_mtx_lock(&so->so_rcv);
}
}
if (orig_resid == uio->uio_resid && orig_resid &&
(flags & MSG_EOR) == 0 &&
@ -1233,10 +1242,10 @@ sorflush(struct socket *so)
/* with SBL_WAIT and SLB_NOINTR sblock() must not fail */
KASSERT(error == 0);
socantrcvmore(so);
mtx_enter(&sb->sb_mtx);
m = sb->sb_mb;
memset(&sb->sb_startzero, 0,
(caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero);
mtx_enter(&sb->sb_mtx);
sb->sb_timeo_nsecs = INFSLP;
mtx_leave(&sb->sb_mtx);
sbunlock(so, sb);
@ -1757,7 +1766,8 @@ somove(struct socket *so, int wait)
void
sorwakeup(struct socket *so)
{
soassertlocked_readonly(so);
if ((so->so_rcv.sb_flags & SB_MTXLOCK) == 0)
soassertlocked_readonly(so);
#ifdef SOCKET_SPLICE
if (so->so_rcv.sb_flags & SB_SPLICE) {
@ -1877,6 +1887,8 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
cnt = 1;
solock(so);
mtx_enter(&sb->sb_mtx);
switch (optname) {
case SO_SNDBUF:
case SO_RCVBUF:
@ -1898,7 +1910,10 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
sb->sb_hiwat : cnt;
break;
}
mtx_leave(&sb->sb_mtx);
sounlock(so);
break;
}
@ -2169,13 +2184,6 @@ sofilt_unlock(struct socket *so, struct sockbuf *sb)
}
}
static inline void
sofilt_assert_locked(struct socket *so, struct sockbuf *sb)
{
MUTEX_ASSERT_LOCKED(&sb->sb_mtx);
soassertlocked_readonly(so);
}
int
soo_kqfilter(struct file *fp, struct knote *kn)
{
@ -2218,9 +2226,14 @@ filt_soread(struct knote *kn, long hint)
struct socket *so = kn->kn_fp->f_data;
int rv = 0;
sofilt_assert_locked(so, &so->so_rcv);
MUTEX_ASSERT_LOCKED(&so->so_rcv.sb_mtx);
if ((so->so_rcv.sb_flags & SB_MTXLOCK) == 0)
soassertlocked_readonly(so);
if (so->so_options & SO_ACCEPTCONN) {
if (so->so_rcv.sb_flags & SB_MTXLOCK)
soassertlocked_readonly(so);
kn->kn_data = so->so_qlen;
rv = (kn->kn_data != 0);
@ -2275,7 +2288,8 @@ filt_sowrite(struct knote *kn, long hint)
struct socket *so = kn->kn_fp->f_data;
int rv;
sofilt_assert_locked(so, &so->so_snd);
MUTEX_ASSERT_LOCKED(&so->so_snd.sb_mtx);
soassertlocked_readonly(so);
kn->kn_data = sbspace(so, &so->so_snd);
if (so->so_snd.sb_state & SS_CANTSENDMORE) {
@ -2306,7 +2320,9 @@ filt_soexcept(struct knote *kn, long hint)
struct socket *so = kn->kn_fp->f_data;
int rv = 0;
sofilt_assert_locked(so, &so->so_rcv);
MUTEX_ASSERT_LOCKED(&so->so_rcv.sb_mtx);
if ((so->so_rcv.sb_flags & SB_MTXLOCK) == 0)
soassertlocked_readonly(so);
#ifdef SOCKET_SPLICE
if (isspliced(so)) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_socket2.c,v 1.144 2024/02/12 22:48:27 mvs Exp $ */
/* $OpenBSD: uipc_socket2.c,v 1.145 2024/03/26 09:46:47 mvs Exp $ */
/* $NetBSD: uipc_socket2.c,v 1.11 1996/02/04 02:17:55 christos Exp $ */
/*
@ -142,7 +142,9 @@ soisdisconnecting(struct socket *so)
soassertlocked(so);
so->so_state &= ~SS_ISCONNECTING;
so->so_state |= SS_ISDISCONNECTING;
mtx_enter(&so->so_rcv.sb_mtx);
so->so_rcv.sb_state |= SS_CANTRCVMORE;
mtx_leave(&so->so_rcv.sb_mtx);
so->so_snd.sb_state |= SS_CANTSENDMORE;
wakeup(&so->so_timeo);
sowwakeup(so);
@ -155,7 +157,9 @@ soisdisconnected(struct socket *so)
soassertlocked(so);
so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
so->so_state |= SS_ISDISCONNECTED;
mtx_enter(&so->so_rcv.sb_mtx);
so->so_rcv.sb_state |= SS_CANTRCVMORE;
mtx_leave(&so->so_rcv.sb_mtx);
so->so_snd.sb_state |= SS_CANTSENDMORE;
wakeup(&so->so_timeo);
sowwakeup(so);
@ -219,9 +223,10 @@ sonewconn(struct socket *head, int connstatus, int wait)
mtx_enter(&head->so_snd.sb_mtx);
so->so_snd.sb_timeo_nsecs = head->so_snd.sb_timeo_nsecs;
mtx_leave(&head->so_snd.sb_mtx);
mtx_enter(&head->so_rcv.sb_mtx);
so->so_rcv.sb_wat = head->so_rcv.sb_wat;
so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
mtx_enter(&head->so_rcv.sb_mtx);
so->so_rcv.sb_timeo_nsecs = head->so_rcv.sb_timeo_nsecs;
mtx_leave(&head->so_rcv.sb_mtx);
@ -651,16 +656,22 @@ soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
if (sbreserve(so, &so->so_snd, sndcc))
goto bad;
if (sbreserve(so, &so->so_rcv, rcvcc))
goto bad2;
so->so_snd.sb_wat = sndcc;
so->so_rcv.sb_wat = rcvcc;
if (so->so_rcv.sb_lowat == 0)
so->so_rcv.sb_lowat = 1;
if (so->so_snd.sb_lowat == 0)
so->so_snd.sb_lowat = MCLBYTES;
if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
mtx_enter(&so->so_rcv.sb_mtx);
if (sbreserve(so, &so->so_rcv, rcvcc)) {
mtx_leave(&so->so_rcv.sb_mtx);
goto bad2;
}
so->so_rcv.sb_wat = rcvcc;
if (so->so_rcv.sb_lowat == 0)
so->so_rcv.sb_lowat = 1;
mtx_leave(&so->so_rcv.sb_mtx);
return (0);
bad2:
sbrelease(so, &so->so_snd);
@ -676,8 +687,7 @@ bad:
int
sbreserve(struct socket *so, struct sockbuf *sb, u_long cc)
{
KASSERT(sb == &so->so_rcv || sb == &so->so_snd);
soassertlocked(so);
sbmtxassertlocked(so, sb);
if (cc == 0 || cc > sb_max)
return (1);
@ -818,7 +828,7 @@ sbappend(struct socket *so, struct sockbuf *sb, struct mbuf *m)
if (m == NULL)
return;
soassertlocked(so);
sbmtxassertlocked(so, sb);
SBLASTRECORDCHK(sb, "sbappend 1");
if ((n = sb->sb_lastrecord) != NULL) {
@ -899,8 +909,7 @@ sbappendrecord(struct socket *so, struct sockbuf *sb, struct mbuf *m0)
{
struct mbuf *m;
KASSERT(sb == &so->so_rcv || sb == &so->so_snd);
soassertlocked(so);
sbmtxassertlocked(so, sb);
if (m0 == NULL)
return;
@ -984,6 +993,8 @@ sbappendcontrol(struct socket *so, struct sockbuf *sb, struct mbuf *m0,
struct mbuf *m, *mlast, *n;
int eor = 0, space = 0;
sbmtxassertlocked(so, sb);
if (control == NULL)
panic("sbappendcontrol");
for (m = control; ; m = m->m_next) {
@ -1109,8 +1120,7 @@ sbdrop(struct socket *so, struct sockbuf *sb, int len)
struct mbuf *m, *mn;
struct mbuf *next;
KASSERT(sb == &so->so_rcv || sb == &so->so_snd);
soassertlocked(so);
sbmtxassertlocked(so, sb);
next = (m = sb->sb_mb) ? m->m_nextpkt : NULL;
while (len > 0) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_usrreq.c,v 1.202 2024/03/22 17:34:11 mvs Exp $ */
/* $OpenBSD: uipc_usrreq.c,v 1.203 2024/03/26 09:46:47 mvs Exp $ */
/* $NetBSD: uipc_usrreq.c,v 1.18 1996/02/09 19:00:50 christos Exp $ */
/*
@ -489,8 +489,10 @@ uipc_rcvd(struct socket *so)
* Adjust backpressure on sender
* and wakeup any waiting to write.
*/
mtx_enter(&so->so_rcv.sb_mtx);
so2->so_snd.sb_mbcnt = so->so_rcv.sb_mbcnt;
so2->so_snd.sb_cc = so->so_rcv.sb_cc;
mtx_leave(&so->so_rcv.sb_mtx);
sowwakeup(so2);
sounlock(so2);
}
@ -499,8 +501,9 @@ int
uipc_send(struct socket *so, struct mbuf *m, struct mbuf *nam,
struct mbuf *control)
{
struct unpcb *unp = sotounpcb(so);
struct socket *so2;
int error = 0;
int error = 0, dowakeup = 0;
if (control) {
sounlock(so);
@ -514,21 +517,24 @@ uipc_send(struct socket *so, struct mbuf *m, struct mbuf *nam,
error = EPIPE;
goto dispose;
}
if ((so2 = unp_solock_peer(so)) == NULL) {
if (unp->unp_conn == NULL) {
error = ENOTCONN;
goto dispose;
}
so2 = unp->unp_conn->unp_socket;
/*
* Send to paired receive port, and then raise
* send buffer counts to maintain backpressure.
* Wake up readers.
*/
mtx_enter(&so2->so_rcv.sb_mtx);
if (control) {
if (sbappendcontrol(so2, &so2->so_rcv, m, control)) {
control = NULL;
} else {
sounlock(so2);
mtx_leave(&so2->so_rcv.sb_mtx);
error = ENOBUFS;
goto dispose;
}
@ -539,9 +545,12 @@ uipc_send(struct socket *so, struct mbuf *m, struct mbuf *nam,
so->so_snd.sb_mbcnt = so2->so_rcv.sb_mbcnt;
so->so_snd.sb_cc = so2->so_rcv.sb_cc;
if (so2->so_rcv.sb_cc > 0)
dowakeup = 1;
mtx_leave(&so2->so_rcv.sb_mtx);
if (dowakeup)
sorwakeup(so2);
sounlock(so2);
m = NULL;
dispose:
@ -563,7 +572,7 @@ uipc_dgram_send(struct socket *so, struct mbuf *m, struct mbuf *nam,
struct unpcb *unp = sotounpcb(so);
struct socket *so2;
const struct sockaddr *from;
int error = 0;
int error = 0, dowakeup = 0;
if (control) {
sounlock(so);
@ -583,7 +592,7 @@ uipc_dgram_send(struct socket *so, struct mbuf *m, struct mbuf *nam,
goto dispose;
}
if ((so2 = unp_solock_peer(so)) == NULL) {
if (unp->unp_conn == NULL) {
if (nam != NULL)
error = ECONNREFUSED;
else
@ -591,20 +600,24 @@ uipc_dgram_send(struct socket *so, struct mbuf *m, struct mbuf *nam,
goto dispose;
}
so2 = unp->unp_conn->unp_socket;
if (unp->unp_addr)
from = mtod(unp->unp_addr, struct sockaddr *);
else
from = &sun_noname;
mtx_enter(&so2->so_rcv.sb_mtx);
if (sbappendaddr(so2, &so2->so_rcv, from, m, control)) {
sorwakeup(so2);
dowakeup = 1;
m = NULL;
control = NULL;
} else
error = ENOBUFS;
mtx_leave(&so2->so_rcv.sb_mtx);
if (so2 != so)
sounlock(so2);
if (dowakeup)
sorwakeup(so2);
if (nam)
unp_disconnect(unp);
@ -1390,9 +1403,9 @@ unp_gc(void *arg __unused)
if ((unp->unp_gcflags & UNP_GCDEAD) == 0)
continue;
so = unp->unp_socket;
solock(so);
mtx_enter(&so->so_rcv.sb_mtx);
unp_scan(so->so_rcv.sb_mb, unp_remove_gcrefs);
sounlock(so);
mtx_leave(&so->so_rcv.sb_mtx);
}
/*
@ -1414,9 +1427,9 @@ unp_gc(void *arg __unused)
unp->unp_gcflags &= ~UNP_GCDEAD;
so = unp->unp_socket;
solock(so);
mtx_enter(&so->so_rcv.sb_mtx);
unp_scan(so->so_rcv.sb_mb, unp_restore_gcrefs);
sounlock(so);
mtx_leave(&so->so_rcv.sb_mtx);
KASSERT(nunref > 0);
nunref--;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: fifo_vnops.c,v 1.103 2024/02/03 22:50:09 mvs Exp $ */
/* $OpenBSD: fifo_vnops.c,v 1.104 2024/03/26 09:46:47 mvs Exp $ */
/* $NetBSD: fifo_vnops.c,v 1.18 1996/03/16 23:52:42 christos Exp $ */
/*
@ -201,7 +201,9 @@ fifo_open(void *v)
if (fip->fi_writers == 1) {
solock(rso);
rso->so_state &= ~SS_ISDISCONNECTED;
mtx_enter(&rso->so_rcv.sb_mtx);
rso->so_rcv.sb_state &= ~SS_CANTRCVMORE;
mtx_leave(&rso->so_rcv.sb_mtx);
sounlock(rso);
if (fip->fi_readers > 0)
wakeup(&fip->fi_readers);
@ -518,7 +520,6 @@ filt_fiforead(struct knote *kn, long hint)
struct socket *so = kn->kn_hook;
int rv;
soassertlocked(so);
MUTEX_ASSERT_LOCKED(&so->so_rcv.sb_mtx);
kn->kn_data = so->so_rcv.sb_cc;
@ -574,7 +575,6 @@ filt_fifoexcept(struct knote *kn, long hint)
struct socket *so = kn->kn_hook;
int rv = 0;
soassertlocked(so);
MUTEX_ASSERT_LOCKED(&so->so_rcv.sb_mtx);
if (kn->kn_flags & __EV_POLL) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: rtable.c,v 1.85 2023/11/12 17:51:40 bluhm Exp $ */
/* $OpenBSD: rtable.c,v 1.86 2024/03/26 10:01:57 bluhm Exp $ */
/*
* Copyright (c) 2014-2016 Martin Pieuchot
@ -875,7 +875,7 @@ an_match(struct art_node *an, const struct sockaddr *dst, int plen)
return (0);
rt = SRPL_FIRST(&sr, &an->an_rtlist);
match = (memcmp(rt->rt_dest, dst, dst->sa_len) == 0);
match = (rt != NULL && memcmp(rt->rt_dest, dst, dst->sa_len) == 0);
SRPL_LEAVE(&sr);
return (match);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: frag6.c,v 1.87 2022/02/22 01:15:02 guenther Exp $ */
/* $OpenBSD: frag6.c,v 1.88 2024/03/26 23:48:49 bluhm Exp $ */
/* $KAME: frag6.c,v 1.40 2002/05/27 21:40:31 itojun Exp $ */
/*
@ -404,8 +404,17 @@ frag6_input(struct mbuf **mp, int *offp, int proto, int af)
/* adjust offset to point where the original next header starts */
offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
pool_put(&ip6af_pool, ip6af);
next += offset - sizeof(struct ip6_hdr);
if ((u_int)next > IPV6_MAXPACKET) {
TAILQ_REMOVE(&frag6_queue, q6, ip6q_queue);
frag6_nfrags -= q6->ip6q_nfrag;
frag6_nfragpackets--;
mtx_leave(&frag6_mutex);
pool_put(&ip6q_pool, q6);
goto dropfrag;
}
ip6 = mtod(m, struct ip6_hdr *);
ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
ip6->ip6_plen = htons(next);
ip6->ip6_src = q6->ip6q_src;
ip6->ip6_dst = q6->ip6q_dst;
if (q6->ip6q_ecn == IPTOS_ECN_CE)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mutex.h,v 1.20 2024/02/03 22:50:09 mvs Exp $ */
/* $OpenBSD: mutex.h,v 1.21 2024/03/26 18:18:30 bluhm Exp $ */
/*
* Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
@ -40,7 +40,7 @@
#include <sys/_lock.h>
struct mutex {
volatile void *mtx_owner;
void *volatile mtx_owner;
int mtx_wantipl;
int mtx_oldipl;
#ifdef WITNESS

View file

@ -1,4 +1,4 @@
/* $OpenBSD: socketvar.h,v 1.125 2024/03/22 17:34:11 mvs Exp $ */
/* $OpenBSD: socketvar.h,v 1.126 2024/03/26 09:46:47 mvs Exp $ */
/* $NetBSD: socketvar.h,v 1.18 1996/02/09 18:25:38 christos Exp $ */
/*-
@ -242,7 +242,10 @@ sb_notify(struct socket *so, struct sockbuf *sb)
static inline long
sbspace(struct socket *so, struct sockbuf *sb)
{
soassertlocked_readonly(so);
if (sb->sb_flags & SB_MTXLOCK)
sbmtxassertlocked(so, sb);
else
soassertlocked_readonly(so);
return lmin(sb->sb_hiwat - sb->sb_cc, sb->sb_mbmax - sb->sb_mbcnt);
}