sync with OpenBSD -current

This commit is contained in:
purplerain 2024-02-04 06:16:28 +00:00
parent 7d66fd8cb0
commit 3f3212838f
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
122 changed files with 1363 additions and 8580 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: cacheinfo.c,v 1.11 2022/07/12 04:46:00 jsg Exp $ */
/* $OpenBSD: cacheinfo.c,v 1.12 2024/02/03 09:53:15 jsg Exp $ */
/*
* Copyright (c) 2022 Jonathan Gray <jsg@openbsd.org>
@ -43,10 +43,7 @@ amd64_print_l1_cacheinfo(struct cpu_info *ci)
printf("%s: ", ci->ci_dev->dv_xname);
if (totalsize < 1024)
printf("%dKB ", totalsize);
else
printf("%dMB ", totalsize >> 10);
printf("%dKB ", totalsize);
printf("%db/line ", linesize);
switch (ways) {
@ -70,10 +67,7 @@ amd64_print_l1_cacheinfo(struct cpu_info *ci)
ways = (edx >> 16) & 0xff;
totalsize = (edx >> 24) & 0xff; /* KB */
if (totalsize < 1024)
printf("%dKB ", totalsize);
else
printf("%dMB ", totalsize >> 10);
printf("%dKB ", totalsize);
printf("%db/line ", linesize);
switch (ways) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: cpu.c,v 1.177 2023/11/22 18:50:10 bluhm Exp $ */
/* $OpenBSD: cpu.c,v 1.178 2024/02/03 16:21:22 deraadt Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
@ -163,6 +163,7 @@ int cpu_apmi_edx = 0; /* cpuid(0x80000007).edx */
int ecpu_ecxfeature = 0; /* cpuid(0x80000001).ecx */
int cpu_meltdown = 0;
int cpu_use_xsaves = 0;
int need_retpoline = 1; /* most systems need retpoline */
void
replacesmap(void)
@ -232,9 +233,11 @@ replacemeltdown(void)
if (ibrs == 2 || (ci->ci_feature_sefflags_edx & SEFF0EDX_IBT)) {
extern const char _jmprax, _jmpr11, _jmpr13;
extern const short _jmprax_len, _jmpr11_len, _jmpr13_len;
codepatch_replace(CPTAG_RETPOLINE_RAX, &_jmprax, _jmprax_len);
codepatch_replace(CPTAG_RETPOLINE_R11, &_jmpr11, _jmpr11_len);
codepatch_replace(CPTAG_RETPOLINE_R13, &_jmpr13, _jmpr13_len);
need_retpoline = 0;
}
if (!cpu_meltdown)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: machdep.c,v 1.289 2024/01/19 18:38:16 kettenis Exp $ */
/* $OpenBSD: machdep.c,v 1.290 2024/02/03 16:21:22 deraadt Exp $ */
/* $NetBSD: machdep.c,v 1.3 2003/05/07 22:58:18 fvdl Exp $ */
/*-
@ -486,6 +486,7 @@ bios_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
extern int tsc_is_invariant;
extern int amd64_has_xcrypt;
extern int need_retpoline;
const struct sysctl_bounded_args cpuctl_vars[] = {
{ CPU_LIDACTION, &lid_action, 0, 2 },
@ -494,6 +495,7 @@ const struct sysctl_bounded_args cpuctl_vars[] = {
{ CPU_CPUFEATURE, &cpu_feature, SYSCTL_INT_READONLY },
{ CPU_XCRYPT, &amd64_has_xcrypt, SYSCTL_INT_READONLY },
{ CPU_INVARIANTTSC, &tsc_is_invariant, SYSCTL_INT_READONLY },
{ CPU_RETPOLINE, &need_retpoline, SYSCTL_INT_READONLY },
};
/*

View file

@ -1,4 +1,4 @@
/* $OpenBSD: cpu.h,v 1.160 2024/01/24 19:23:39 cheloha Exp $ */
/* $OpenBSD: cpu.h,v 1.161 2024/02/03 16:21:22 deraadt Exp $ */
/* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */
/*-
@ -481,7 +481,8 @@ void mp_setperf_init(void);
#define CPU_TSCFREQ 16 /* TSC frequency */
#define CPU_INVARIANTTSC 17 /* has invariant TSC */
#define CPU_PWRACTION 18 /* action caused by power button */
#define CPU_MAXID 19 /* number of valid machdep ids */
#define CPU_RETPOLINE 19 /* cpu requires retpoline pattern */
#define CPU_MAXID 20 /* number of valid machdep ids */
#define CTL_MACHDEP_NAMES { \
{ 0, 0 }, \
@ -503,6 +504,7 @@ void mp_setperf_init(void);
{ "tscfreq", CTLTYPE_QUAD }, \
{ "invarianttsc", CTLTYPE_INT }, \
{ "pwraction", CTLTYPE_INT }, \
{ "retpoline", CTLTYPE_INT }, \
}
#endif /* !_MACHINE_CPU_H_ */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: acpipci.c,v 1.41 2023/09/16 23:25:16 jmatthew Exp $ */
/* $OpenBSD: acpipci.c,v 1.42 2024/02/03 10:37:25 kettenis Exp $ */
/*
* Copyright (c) 2018 Mark Kettenis
*
@ -207,6 +207,7 @@ acpipci_attach(struct device *parent, struct device *self, void *aux)
sc->sc_pc->pc_intr_v = sc;
sc->sc_pc->pc_intr_map = acpipci_intr_map;
sc->sc_pc->pc_intr_map_msi = _pci_intr_map_msi;
sc->sc_pc->pc_intr_map_msivec = _pci_intr_map_msivec;
sc->sc_pc->pc_intr_map_msix = _pci_intr_map_msix;
sc->sc_pc->pc_intr_string = acpipci_intr_string;
sc->sc_pc->pc_intr_establish = acpipci_intr_establish;
@ -629,7 +630,7 @@ acpipci_intr_establish(void *v, pci_intr_handle_t ih, int level,
if (ih.ih_type != PCI_INTX) {
struct interrupt_controller *ic = sc->sc_msi_ic;
bus_dma_segment_t seg;
uint64_t addr, data;
uint64_t addr = 0, data;
KASSERT(ic);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: agintc.c,v 1.54 2023/09/22 01:10:43 jsg Exp $ */
/* $OpenBSD: agintc.c,v 1.55 2024/02/03 10:37:25 kettenis Exp $ */
/*
* Copyright (c) 2007, 2009, 2011, 2017 Dale Rahn <drahn@dalerahn.com>
* Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
@ -1540,7 +1540,7 @@ struct agintc_msi_device {
LIST_ENTRY(agintc_msi_device) md_list;
uint32_t md_deviceid;
uint32_t md_eventid;
uint32_t md_events;
struct agintc_dmamem *md_itt;
};
@ -1949,7 +1949,15 @@ agintc_intr_establish_msi(void *self, uint64_t *addr, uint64_t *data,
if (md == NULL)
return NULL;
eventid = md->md_eventid++;
eventid = *addr;
if (eventid > 0 && (md->md_events & (1U << eventid)))
return NULL;
for (; eventid < 32; eventid++) {
if ((md->md_events & (1U << eventid)) == 0) {
md->md_events |= (1U << eventid);
break;
}
}
if (eventid >= 32)
return NULL;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: aplpcie.c,v 1.18 2023/12/28 13:32:56 kettenis Exp $ */
/* $OpenBSD: aplpcie.c,v 1.19 2024/02/03 10:37:25 kettenis Exp $ */
/*
* Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
*
@ -405,6 +405,7 @@ aplpcie_attach(struct device *parent, struct device *self, void *aux)
sc->sc_pc.pc_intr_v = sc;
sc->sc_pc.pc_intr_map = aplpcie_intr_map;
sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
sc->sc_pc.pc_intr_map_msivec = _pci_intr_map_msivec;
sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
sc->sc_pc.pc_intr_string = aplpcie_intr_string;
sc->sc_pc.pc_intr_establish = aplpcie_intr_establish;
@ -939,6 +940,7 @@ aplpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
if (ih.ih_type != PCI_INTX) {
uint64_t addr, data;
addr = data = 0;
cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr,
&data, level, ci, func, arg, name);
if (cookie == NULL)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pci_machdep.c,v 1.5 2021/03/22 20:30:21 patrick Exp $ */
/* $OpenBSD: pci_machdep.c,v 1.6 2024/02/03 10:37:25 kettenis Exp $ */
/*
* Copyright (c) 2019 Mark Kettenis <kettenis@openbsd.org>
@ -24,16 +24,45 @@
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
int
pci_intr_enable_msivec(struct pci_attach_args *pa, int num_vec)
{
pci_chipset_tag_t pc = pa->pa_pc;
pcitag_t tag = pa->pa_tag;
pcireg_t reg;
int mmc, mme, off;
if ((pa->pa_flags & PCI_FLAGS_MSI_ENABLED) == 0 ||
pci_get_capability(pc, tag, PCI_CAP_MSI, &off, &reg) == 0)
return 1;
mmc = ((reg & PCI_MSI_MC_MMC_MASK) >> PCI_MSI_MC_MMC_SHIFT);
if (num_vec > (1 << mmc))
return 1;
mme = ((reg & PCI_MSI_MC_MME_MASK) >> PCI_MSI_MC_MME_SHIFT);
while ((1 << mme) < num_vec)
mme++;
reg &= ~PCI_MSI_MC_MME_MASK;
reg |= (mme << PCI_MSI_MC_MME_SHIFT);
pci_conf_write(pc, tag, off, reg);
return 0;
}
void
pci_msi_enable(pci_chipset_tag_t pc, pcitag_t tag,
bus_addr_t addr, uint32_t data)
{
pcireg_t reg;
int off;
int mme, off;
if (pci_get_capability(pc, tag, PCI_CAP_MSI, &off, &reg) == 0)
panic("%s: no msi capability", __func__);
mme = ((reg & PCI_MSI_MC_MME_MASK) >> PCI_MSI_MC_MME_SHIFT);
data &= ~((1 << mme) - 1);
if (reg & PCI_MSI_MC_C64) {
pci_conf_write(pc, tag, off + PCI_MSI_MA, addr);
pci_conf_write(pc, tag, off + PCI_MSI_MAU32, addr >> 32);
@ -128,6 +157,33 @@ _pci_intr_map_msi(struct pci_attach_args *pa, pci_intr_handle_t *ihp)
ihp->ih_pc = pa->pa_pc;
ihp->ih_tag = pa->pa_tag;
ihp->ih_intrpin = 0;
ihp->ih_type = PCI_MSI;
ihp->ih_dmat = pa->pa_dmat;
return 0;
}
int
_pci_intr_map_msivec(struct pci_attach_args *pa, int vec,
pci_intr_handle_t *ihp)
{
pci_chipset_tag_t pc = pa->pa_pc;
pcitag_t tag = pa->pa_tag;
pcireg_t reg;
int mme, off;
if ((pa->pa_flags & PCI_FLAGS_MSIVEC_ENABLED) == 0 ||
pci_get_capability(pc, tag, PCI_CAP_MSI, &off, &reg) == 0)
return -1;
mme = ((reg & PCI_MSI_MC_MME_MASK) >> PCI_MSI_MC_MME_SHIFT);
if (vec >= (1 << mme))
return -1;
ihp->ih_pc = pa->pa_pc;
ihp->ih_tag = pa->pa_tag;
ihp->ih_intrpin = vec;
ihp->ih_type = PCI_MSI;
ihp->ih_dmat = pa->pa_dmat;
@ -164,4 +220,3 @@ _pci_intr_map_msix(struct pci_attach_args *pa, int vec,
return 0;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pci_machdep.h,v 1.11 2021/06/11 12:23:52 kettenis Exp $ */
/* $OpenBSD: pci_machdep.h,v 1.12 2024/02/03 10:37:26 kettenis Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@ -70,6 +70,8 @@ struct machine_pci_chipset {
pci_intr_handle_t *);
int (*pc_intr_map_msi)(struct pci_attach_args *,
pci_intr_handle_t *);
int (*pc_intr_map_msivec)(struct pci_attach_args *,
int, pci_intr_handle_t *);
int (*pc_intr_map_msix)(struct pci_attach_args *,
int, pci_intr_handle_t *);
const char *(*pc_intr_string)(void *, pci_intr_handle_t);
@ -102,6 +104,8 @@ struct machine_pci_chipset {
(*(c)->pa_pc->pc_intr_map)((c), (ihp))
#define pci_intr_map_msi(c, ihp) \
(*(c)->pa_pc->pc_intr_map_msi)((c), (ihp))
#define pci_intr_map_msivec(c, vec, ihp) \
(*(c)->pa_pc->pc_intr_map_msivec)((c), (vec), (ihp))
#define pci_intr_map_msix(c, vec, ihp) \
(*(c)->pa_pc->pc_intr_map_msix)((c), (vec), (ihp))
#define pci_intr_string(c, ih) \
@ -123,10 +127,14 @@ struct machine_pci_chipset {
void pci_mcfg_init(bus_space_tag_t, bus_addr_t, int, int, int);
pci_chipset_tag_t pci_lookup_segment(int);
int pci_intr_enable_msivec(struct pci_attach_args *, int);
void pci_msi_enable(pci_chipset_tag_t, pcitag_t, bus_addr_t, uint32_t);
void pci_msix_enable(pci_chipset_tag_t, pcitag_t, bus_space_tag_t,
int, bus_addr_t, uint32_t);
int _pci_intr_map_msi(struct pci_attach_args *, pci_intr_handle_t *);
int _pci_intr_map_msivec(struct pci_attach_args *, int,
pci_intr_handle_t *);
int _pci_intr_map_msix(struct pci_attach_args *, int, pci_intr_handle_t *);
#define __HAVE_PCI_MSIX

View file

@ -1,4 +1,4 @@
# $OpenBSD: files,v 1.729 2024/01/01 18:25:50 kettenis Exp $
# $OpenBSD: files,v 1.730 2024/02/03 18:51:57 beck Exp $
# $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $
# @(#)files.newconf 7.5 (Berkeley) 5/10/93
@ -960,11 +960,9 @@ file ufs/ffs/ffs_alloc.c ffs | mfs
file ufs/ffs/ffs_balloc.c ffs | mfs
file ufs/ffs/ffs_inode.c ffs | mfs
file ufs/ffs/ffs_subr.c ffs | mfs
file ufs/ffs/ffs_softdep_stub.c ffs | mfs
file ufs/ffs/ffs_tables.c ffs | mfs
file ufs/ffs/ffs_vfsops.c ffs | mfs
file ufs/ffs/ffs_vnops.c ffs | mfs
file ufs/ffs/ffs_softdep.c ffs_softupdates
file ufs/mfs/mfs_vfsops.c mfs
file ufs/mfs/mfs_vnops.c mfs
file ufs/ufs/ufs_bmap.c ffs | mfs | ext2fs

View file

@ -1,4 +1,4 @@
/* $OpenBSD: db_interface.h,v 1.26 2023/07/02 19:02:27 cheloha Exp $ */
/* $OpenBSD: db_interface.h,v 1.27 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: db_interface.h,v 1.1 1996/02/05 01:57:03 christos Exp $ */
/*
@ -78,11 +78,6 @@ void nfs_node_print(void *, int, int (*)(const char *, ...));
/* uvm/uvm_swap.c */
void swap_print_all(int (*)(const char *, ...));
/* ufs/ffs/ffs_softdep.c */
struct worklist;
void worklist_print(struct worklist *, int, int (*)(const char *, ...));
void softdep_print(struct buf *, int, int (*)(const char *, ...));
/* arch/<arch>/<arch>/db_interface.c */
void db_machine_init(void);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: bcm2711_pcie.c,v 1.11 2022/04/06 18:59:28 naddy Exp $ */
/* $OpenBSD: bcm2711_pcie.c,v 1.12 2024/02/03 10:37:26 kettenis Exp $ */
/*
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
*
@ -299,6 +299,7 @@ bcmpcie_attach(struct device *parent, struct device *self, void *aux)
sc->sc_pc.pc_intr_v = sc;
sc->sc_pc.pc_intr_map = bcmpcie_intr_map;
sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
sc->sc_pc.pc_intr_map_msivec = _pci_intr_map_msivec;
sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
sc->sc_pc.pc_intr_string = bcmpcie_intr_string;
sc->sc_pc.pc_intr_establish = bcmpcie_intr_establish;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dwpcie.c,v 1.50 2023/09/21 19:39:41 patrick Exp $ */
/* $OpenBSD: dwpcie.c,v 1.51 2024/02/03 10:37:26 kettenis Exp $ */
/*
* Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
*
@ -711,6 +711,7 @@ dwpcie_attach_deferred(struct device *self)
sc->sc_pc.pc_intr_v = sc;
sc->sc_pc.pc_intr_map = dwpcie_intr_map;
sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
sc->sc_pc.pc_intr_map_msivec = _pci_intr_map_msivec;
sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
sc->sc_pc.pc_intr_string = dwpcie_intr_string;
sc->sc_pc.pc_intr_establish = dwpcie_intr_establish;
@ -729,6 +730,8 @@ dwpcie_attach_deferred(struct device *self)
OF_getproplen(sc->sc_node, "msi-map") > 0 ||
sc->sc_msi_addr)
pba.pba_flags |= PCI_FLAGS_MSI_ENABLED;
if (OF_getproplen(sc->sc_node, "msi-map") > 0)
pba.pba_flags |= PCI_FLAGS_MSIVEC_ENABLED;
/* XXX No working MSI on RK3588 yet. */
if (OF_is_compatible(sc->sc_node, "rockchip,rk3588-pcie"))
@ -1835,6 +1838,8 @@ dwpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
uint64_t addr, data;
if (sc->sc_msi_addr) {
if (ih.ih_type == PCI_MSI && ih.ih_intrpin > 0)
return NULL;
dm = dwpcie_msi_establish(sc, level, func, arg, name);
if (dm == NULL)
return NULL;
@ -1845,6 +1850,7 @@ dwpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
* Assume hardware passes Requester ID as
* sideband data.
*/
addr = ih.ih_intrpin;
data = pci_requester_id(ih.ih_pc, ih.ih_tag);
cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr,
&data, level, ci, func, arg, (void *)name);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mvkpcie.c,v 1.13 2022/04/06 18:59:28 naddy Exp $ */
/* $OpenBSD: mvkpcie.c,v 1.14 2024/02/03 10:37:26 kettenis Exp $ */
/*
* Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
* Copyright (c) 2020 Patrick Wildt <patrick@blueri.se>
@ -528,6 +528,7 @@ mvkpcie_attach(struct device *parent, struct device *self, void *aux)
sc->sc_pc.pc_intr_v = sc;
sc->sc_pc.pc_intr_map = mvkpcie_intr_map;
sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
sc->sc_pc.pc_intr_map_msivec = _pci_intr_map_msivec;
sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
sc->sc_pc.pc_intr_string = mvkpcie_intr_string;
sc->sc_pc.pc_intr_establish = mvkpcie_intr_establish;
@ -783,7 +784,7 @@ mvkpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
KASSERT(ih.ih_type != PCI_NONE);
if (ih.ih_type != PCI_INTX) {
uint64_t addr, data;
uint64_t addr = 0, data;
/* Assume hardware passes Requester ID as sideband data. */
data = pci_requester_id(ih.ih_pc, ih.ih_tag);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pciecam.c,v 1.4 2022/04/06 18:59:28 naddy Exp $ */
/* $OpenBSD: pciecam.c,v 1.5 2024/02/03 10:37:26 kettenis Exp $ */
/*
* Copyright (c) 2013,2017 Patrick Wildt <patrick@blueri.se>
*
@ -245,6 +245,7 @@ pciecam_attach(struct device *parent, struct device *self, void *aux)
sc->sc_pc.pc_intr_v = sc;
sc->sc_pc.pc_intr_map = pciecam_intr_map;
sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
sc->sc_pc.pc_intr_map_msivec = _pci_intr_map_msivec;
sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
sc->sc_pc.pc_intr_string = pciecam_intr_string;
sc->sc_pc.pc_intr_establish = pciecam_intr_establish;
@ -391,7 +392,7 @@ pciecam_intr_establish(void *self, pci_intr_handle_t ih, int level,
KASSERT(ih.ih_type != PCI_NONE);
if (ih.ih_type != PCI_INTX) {
uint64_t addr, data;
uint64_t addr = 0, data;
/* Assume hardware passes Requester ID as sideband data. */
data = pci_requester_id(ih.ih_pc, ih.ih_tag);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: rkpcie.c,v 1.17 2023/04/11 00:45:08 jsg Exp $ */
/* $OpenBSD: rkpcie.c,v 1.18 2024/02/03 10:37:26 kettenis Exp $ */
/*
* Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
*
@ -372,6 +372,7 @@ rkpcie_attach(struct device *parent, struct device *self, void *aux)
sc->sc_pc.pc_intr_v = sc;
sc->sc_pc.pc_intr_map = rkpcie_intr_map;
sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
sc->sc_pc.pc_intr_map_msivec = _pci_intr_map_msivec;
sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
sc->sc_pc.pc_intr_string = rkpcie_intr_string;
sc->sc_pc.pc_intr_establish = rkpcie_intr_establish;
@ -605,7 +606,7 @@ rkpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
KASSERT(ih.ih_type != PCI_NONE);
if (ih.ih_type != PCI_INTX) {
uint64_t addr, data;
uint64_t addr = 0, data;
/* Assume hardware passes Requester ID as sideband data. */
data = pci_requester_id(ih.ih_pc, ih.ih_tag);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qwx.c,v 1.14 2024/02/02 15:44:19 stsp Exp $ */
/* $OpenBSD: qwx.c,v 1.16 2024/02/03 20:07:19 kettenis Exp $ */
/*
* Copyright 2023 Stefan Sperling <stsp@openbsd.org>
@ -68,6 +68,10 @@
#include <machine/bus.h>
#include <machine/intr.h>
#ifdef __HAVE_FDT
#include <dev/ofw/openfirm.h>
#endif
#include <net/if.h>
#include <net/if_media.h>
@ -136,6 +140,8 @@ int qwx_mac_start(struct qwx_softc *);
void qwx_mac_scan_finish(struct qwx_softc *);
int qwx_mac_mgmt_tx_wmi(struct qwx_softc *, struct qwx_vif *, uint8_t,
struct mbuf *);
int qwx_dp_tx(struct qwx_softc *, struct qwx_vif *, uint8_t,
struct ieee80211_node *, struct mbuf *);
int qwx_dp_tx_send_reo_cmd(struct qwx_softc *, struct dp_rx_tid *,
enum hal_reo_cmd_type , struct ath11k_hal_reo_cmd *,
void (*func)(struct qwx_dp *, void *, enum hal_reo_cmd_status));
@ -358,9 +364,7 @@ qwx_tx(struct qwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
if (frame_type == IEEE80211_FC0_TYPE_MGT)
return qwx_mac_mgmt_tx_wmi(sc, arvif, pdev_id, m);
printf("%s: not implemented\n", __func__);
m_freem(m);
return ENOTSUP;
return qwx_dp_tx(sc, arvif, pdev_id, ni, m);
}
void
@ -1455,10 +1459,38 @@ qwx_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
return desc->u.wcn6855.mpdu_start.addr2;
}
/* Map from pdev index to hw mac index */
uint8_t
qwx_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
{
switch (pdev_idx) {
case 0:
return 0;
case 1:
return 2;
case 2:
return 1;
default:
return ATH11K_INVALID_HW_MAC_ID;
}
}
uint8_t qwx_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
{
return pdev_idx;
}
static inline int
qwx_hw_get_mac_from_pdev_id(struct qwx_softc *sc, int pdev_idx)
{
if (sc->hw_params.hw_ops->get_hw_mac_from_pdev_id)
return sc->hw_params.hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
return 0;
}
const struct ath11k_hw_ops ipq8074_ops = {
#if notyet
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
#endif
.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = qwx_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
@ -1510,9 +1542,7 @@ const struct ath11k_hw_ops ipq8074_ops = {
};
const struct ath11k_hw_ops ipq6018_ops = {
#if notyet
.get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
#endif
.get_hw_mac_from_pdev_id = qwx_hw_ipq6018_mac_from_pdev_id,
.wmi_init_config = qwx_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
@ -1564,9 +1594,7 @@ const struct ath11k_hw_ops ipq6018_ops = {
};
const struct ath11k_hw_ops qca6390_ops = {
#if notyet
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
#endif
.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = qwx_init_wmi_config_qca6390,
.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
@ -1618,13 +1646,11 @@ const struct ath11k_hw_ops qca6390_ops = {
};
const struct ath11k_hw_ops qcn9074_ops = {
#if notyet
.get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
#endif
.get_hw_mac_from_pdev_id = qwx_hw_ipq6018_mac_from_pdev_id,
.wmi_init_config = qwx_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
#ifdef notyet
#if notyet
.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
@ -1672,13 +1698,11 @@ const struct ath11k_hw_ops qcn9074_ops = {
};
const struct ath11k_hw_ops wcn6855_ops = {
#if notyet
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
#endif
.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = qwx_init_wmi_config_qca6390,
.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
#ifdef notyet
#if notyet
.tx_mesh_enable = ath11k_hw_wcn6855_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_wcn6855_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_wcn6855_rx_desc_get_last_msdu,
@ -1726,9 +1750,7 @@ const struct ath11k_hw_ops wcn6855_ops = {
};
const struct ath11k_hw_ops wcn6750_ops = {
#if notyet
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
#endif
.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = qwx_init_wmi_config_qca6390,
.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
@ -3052,7 +3074,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.sram_dump = {},
.tcl_ring_retry = true,
#endif
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
#ifdef notyet
.smp2p_wow_exit = false,
#endif
},
@ -3139,7 +3163,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.sram_dump = {},
.tcl_ring_retry = true,
#endif
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
#ifdef notyet
.smp2p_wow_exit = false,
#endif
},
@ -3228,7 +3254,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.tcl_ring_retry = true,
#endif
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
#ifdef notyet
.smp2p_wow_exit = false,
#endif
},
@ -3316,7 +3344,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.sram_dump = {},
.tcl_ring_retry = true,
#endif
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
#ifdef notyet
.smp2p_wow_exit = false,
#endif
},
@ -3405,7 +3435,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.tcl_ring_retry = true,
#endif
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
#ifdef notyet
.smp2p_wow_exit = false,
#endif
},
@ -3493,7 +3525,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.tcl_ring_retry = true,
#endif
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
#ifdef notyet
.smp2p_wow_exit = false,
#endif
},
@ -3578,7 +3612,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.sram_dump = {},
.tcl_ring_retry = false,
#endif
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
#ifdef notyet
.smp2p_wow_exit = true,
#endif
},
@ -7815,7 +7851,15 @@ qwx_core_check_smbios(struct qwx_softc *sc)
int
qwx_core_check_dt(struct qwx_softc *sc)
{
return 0; /* TODO */
#ifdef __HAVE_FDT
if (sc->sc_node == 0)
return 0;
OF_getprop(sc->sc_node, "qcom,ath11k-calibration-variant",
sc->qmi_target.bdf_ext, sizeof(sc->qmi_target.bdf_ext) - 1);
#endif
return 0;
}
int
@ -9953,6 +9997,51 @@ qwx_dp_link_desc_cleanup(struct qwx_softc *sc,
}
}
void
qwx_dp_tx_ring_free_tx_data(struct qwx_softc *sc, struct dp_tx_ring *tx_ring)
{
int i;
if (tx_ring->data == NULL)
return;
for (i = 0; i < sc->hw_params.tx_ring_size; i++) {
struct qwx_tx_data *tx_data = &tx_ring->data[i];
if (tx_data->map) {
bus_dmamap_unload(sc->sc_dmat, tx_data->map);
bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
}
m_freem(tx_data->m);
}
free(tx_ring->data, M_DEVBUF,
sc->hw_params.tx_ring_size * sizeof(struct qwx_tx_data));
tx_ring->data = NULL;
}
int
qwx_dp_tx_ring_alloc_tx_data(struct qwx_softc *sc, struct dp_tx_ring *tx_ring)
{
int i, ret;
tx_ring->data = mallocarray(sc->hw_params.tx_ring_size,
sizeof(struct qwx_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO);
if (tx_ring->data == NULL)
return ENOMEM;
for (i = 0; i < sc->hw_params.tx_ring_size; i++) {
struct qwx_tx_data *tx_data = &tx_ring->data[i];
ret = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
BUS_DMA_NOWAIT, &tx_data->map);
if (ret)
return ret;
}
return 0;
}
int
qwx_dp_alloc(struct qwx_softc *sc)
@ -10003,8 +10092,13 @@ qwx_dp_alloc(struct qwx_softc *sc)
idr_init(&dp->tx_ring[i].txbuf_idr);
spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
#endif
dp->tx_ring[i].tcl_data_ring_id = i;
ret = qwx_dp_tx_ring_alloc_tx_data(sc, &dp->tx_ring[i]);
if (ret)
goto fail_cmn_srng_cleanup;
dp->tx_ring[i].cur = 0;
dp->tx_ring[i].queued = 0;
dp->tx_ring[i].tcl_data_ring_id = i;
dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
dp->tx_ring[i].tx_status = malloc(size, M_DEVBUF,
@ -10051,6 +10145,7 @@ qwx_dp_free(struct qwx_softc *sc)
idr_destroy(&dp->tx_ring[i].txbuf_idr);
spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
#endif
qwx_dp_tx_ring_free_tx_data(sc, &dp->tx_ring[i]);
free(dp->tx_ring[i].tx_status, M_DEVBUF,
sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE);
dp->tx_ring[i].tx_status = NULL;
@ -14544,9 +14639,146 @@ qwx_dp_vdev_tx_attach(struct qwx_softc *sc, struct qwx_pdev *pdev,
qwx_dp_update_vdev_search(sc, arvif);
}
void
qwx_dp_tx_status_parse(struct qwx_softc *sc, struct hal_wbm_release_ring *desc,
struct hal_tx_status *ts)
{
ts->buf_rel_source = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
desc->info0);
if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
return;
if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
return;
ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
desc->info0);
ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
desc->info1);
ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
desc->info1);
ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
desc->info2);
if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
ts->rate_stats = desc->rate_stats.info0;
else
ts->rate_stats = 0;
}
void
qwx_dp_tx_process_htt_tx_complete(struct qwx_softc *sc, void *desc,
uint8_t mac_id, uint32_t msdu_id, struct dp_tx_ring *tx_ring)
{
printf("%s: not implemented\n", __func__);
}
void
qwx_dp_tx_complete_msdu(struct qwx_softc *sc, struct dp_tx_ring *tx_ring,
uint32_t msdu_id, struct hal_tx_status *ts)
{
struct qwx_tx_data *tx_data = &tx_ring->data[msdu_id];
if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM) {
/* Must not happen */
return;
}
bus_dmamap_unload(sc->sc_dmat, tx_data->map);
m_freem(tx_data->m);
tx_data->m = NULL;
/* TODO: Tx rate adjustment? */
if (tx_ring->queued > 0)
tx_ring->queued--;
}
#define QWX_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
int
qwx_dp_tx_completion_handler(struct qwx_softc *sc, int ring_id)
{
struct qwx_dp *dp = &sc->dp;
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &sc->hal.srng_list[hal_ring_id];
struct hal_tx_status ts = { 0 };
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
uint32_t *desc;
uint32_t msdu_id;
uint8_t mac_id;
#ifdef notyet
spin_lock_bh(&status_ring->lock);
#endif
qwx_hal_srng_access_begin(sc, status_ring);
while ((QWX_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
tx_ring->tx_status_tail) &&
(desc = qwx_hal_srng_dst_get_next_entry(sc, status_ring))) {
memcpy(&tx_ring->tx_status[tx_ring->tx_status_head], desc,
sizeof(struct hal_wbm_release_ring));
tx_ring->tx_status_head =
QWX_TX_COMPL_NEXT(tx_ring->tx_status_head);
}
#if 0
if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
(ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
tx_ring->tx_status_tail))) {
/* TODO: Process pending tx_status messages when kfifo_is_full() */
ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
}
#endif
qwx_hal_srng_access_end(sc, status_ring);
#ifdef notyet
spin_unlock_bh(&status_ring->lock);
#endif
while (QWX_TX_COMPL_NEXT(tx_ring->tx_status_tail) !=
tx_ring->tx_status_head) {
struct hal_wbm_release_ring *tx_status;
uint32_t desc_id;
tx_ring->tx_status_tail =
QWX_TX_COMPL_NEXT(tx_ring->tx_status_tail);
tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
qwx_dp_tx_status_parse(sc, tx_status, &ts);
desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
tx_status->buf_addr_info.info1);
mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
if (mac_id >= MAX_RADIOS)
continue;
msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
if (msdu_id >= sc->hw_params.tx_ring_size)
continue;
if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
qwx_dp_tx_process_htt_tx_complete(sc,
(void *)tx_status, mac_id, msdu_id, tx_ring);
continue;
}
#if 0
spin_lock(&tx_ring->tx_idr_lock);
msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
if (unlikely(!msdu)) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
spin_unlock(&tx_ring->tx_idr_lock);
continue;
}
spin_unlock(&tx_ring->tx_idr_lock);
ar = ab->pdevs[mac_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
#endif
qwx_dp_tx_complete_msdu(sc, tx_ring, msdu_id, &ts);
}
return 0;
}
@ -21050,6 +21282,7 @@ int
qwx_peer_create(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
struct ieee80211_node *ni, struct peer_create_params *param)
{
struct ieee80211com *ic = &sc->sc_ic;
struct qwx_node *nq = (struct qwx_node *)ni;
struct ath11k_peer *peer;
int ret;
@ -21131,11 +21364,12 @@ qwx_peer_create(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
peer->pdev_id = pdev_id;
#if 0
peer->sta = sta;
if (arvif->vif->type == NL80211_IFTYPE_STATION) {
#endif
if (ic->ic_opmode == IEEE80211_M_STA) {
arvif->ast_hash = peer->ast_hash;
arvif->ast_idx = peer->hw_peer_id;
}
#if 0
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
@ -21715,6 +21949,249 @@ peer_clean:
return ret;
}
enum hal_tcl_encap_type
qwx_dp_tx_get_encap_type(struct qwx_softc *sc)
{
if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
return HAL_TCL_ENCAP_TYPE_RAW;
#if 0
if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
return HAL_TCL_ENCAP_TYPE_ETHERNET;
#endif
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
}
uint8_t
qwx_dp_tx_get_tid(struct mbuf *m)
{
struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
uint16_t qos = ieee80211_get_qos(wh);
uint8_t tid = qos & IEEE80211_QOS_TID;
return tid;
}
void
qwx_hal_tx_cmd_desc_setup(struct qwx_softc *sc, void *cmd,
struct hal_tx_info *ti)
{
struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
tcl_cmd->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
ti->paddr);
tcl_cmd->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
tcl_cmd->buf_addr_info.info1 |= FIELD_PREP(
BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
tcl_cmd->info0 =
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE, ti->encrypt_type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE, ti->search_type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN, ti->addr_search_flags) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM, ti->meta_data_flags);
tcl_cmd->info1 = ti->flags0 |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
tcl_cmd->info2 = ti->flags1 |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
ti->dscp_tid_tbl_idx) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX, ti->bss_ast_idx) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM, ti->bss_ast_hash);
tcl_cmd->info4 = 0;
#ifdef notyet
if (ti->enable_mesh)
ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd);
#endif
}
int
qwx_dp_tx(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
struct ieee80211_node *ni, struct mbuf *m)
{
struct ieee80211com *ic = &sc->sc_ic;
struct qwx_dp *dp = &sc->dp;
struct hal_tx_info ti = {0};
struct qwx_tx_data *tx_data;
struct hal_srng *tcl_ring;
struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
struct ieee80211_key *k = NULL;
struct dp_tx_ring *tx_ring;
void *hal_tcl_desc;
uint8_t pool_id;
uint8_t hal_ring_id;
int ret, msdu_id;
uint32_t ring_selector = 0;
uint8_t ring_map = 0;
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
m_freem(m);
printf("%s: crash flush\n", __func__);
return ESHUTDOWN;
}
#if 0
if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control)))
return -ENOTSUPP;
#endif
pool_id = 0;
ring_selector = 0;
ti.ring_id = ring_selector % sc->hw_params.max_tx_ring;
ti.rbm_id = sc->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
ring_map |= (1 << ti.ring_id);
tx_ring = &dp->tx_ring[ti.ring_id];
if (tx_ring->queued >= sc->hw_params.tx_ring_size) {
m_freem(m);
return ENOSPC;
}
msdu_id = tx_ring->cur;
tx_data = &tx_ring->data[msdu_id];
if (tx_data->m != NULL) {
m_freem(m);
return ENOSPC;
}
ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, pdev_id) |
FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, msdu_id) |
FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
ti.encap_type = qwx_dp_tx_get_encap_type(sc);
ti.meta_data_flags = arvif->tcl_metadata;
if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
#if 0
if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
ti.encrypt_type =
ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
if (ieee80211_has_protected(hdr->frame_control))
skb_put(skb, IEEE80211_CCMP_MIC_LEN);
} else
#endif
ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_get_txkey(ic, wh, ni);
if ((m = ieee80211_encrypt(ic, m, k)) == NULL) {
printf("%s: encrypt failed\n", __func__);
return ENOBUFS;
}
/* 802.11 header may have moved. */
wh = mtod(m, struct ieee80211_frame *);
}
}
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
ti.type = HAL_TCL_DESC_TYPE_BUFFER;
ti.pkt_offset = 0;
ti.lmac_id = qwx_hw_get_mac_from_pdev_id(sc, pdev_id);
ti.bss_ast_hash = arvif->ast_hash;
ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
#if 0
if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
}
if (ieee80211_vif_is_mesh(arvif->vif))
ti.enable_mesh = true;
#endif
ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
ti.tid = qwx_dp_tx_get_tid(m);
#if 0
switch (ti.encap_type) {
case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
ath11k_dp_tx_encap_nwifi(skb);
break;
case HAL_TCL_ENCAP_TYPE_RAW:
if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
ret = -EINVAL;
goto fail_remove_idr;
}
break;
case HAL_TCL_ENCAP_TYPE_ETHERNET:
/* no need to encap */
break;
case HAL_TCL_ENCAP_TYPE_802_3:
default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
goto fail_remove_idr;
}
#endif
ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (ret) {
printf("%s: failed to map Tx buffer: %d\n",
sc->sc_dev.dv_xname, ret);
m_freem(m);
return ret;
}
ti.paddr = tx_data->map->dm_segs[0].ds_addr;
ti.data_len = m->m_pkthdr.len;
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
tcl_ring = &sc->hal.srng_list[hal_ring_id];
#ifdef notyet
spin_lock_bh(&tcl_ring->lock);
#endif
qwx_hal_srng_access_begin(sc, tcl_ring);
hal_tcl_desc = (void *)qwx_hal_srng_src_get_next_entry(sc, tcl_ring);
if (!hal_tcl_desc) {
printf("%s: hal_tcl_desc == NULL\n", __func__);
/* NOTE: It is highly unlikely we'll be running out of tcl_ring
* desc because the desc is directly enqueued onto hw queue.
*/
qwx_hal_srng_access_end(sc, tcl_ring);
#if 0
ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
#endif
#ifdef notyet
spin_unlock_bh(&tcl_ring->lock);
#endif
bus_dmamap_unload(sc->sc_dmat, tx_data->map);
m_freem(m);
return ENOMEM;
}
tx_data->m = m;
qwx_hal_tx_cmd_desc_setup(sc,
hal_tcl_desc + sizeof(struct hal_tlv_hdr), &ti);
qwx_hal_srng_access_end(sc, tcl_ring);
qwx_dp_shadow_start_timer(sc, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
#ifdef notyet
spin_unlock_bh(&tcl_ring->lock);
#endif
tx_ring->queued++;
tx_ring->cur = (tx_ring->cur + 1) % sc->hw_params.tx_ring_size;
return 0;
}
int
qwx_mac_station_add(struct qwx_softc *sc, struct qwx_vif *arvif,
uint8_t pdev_id, struct ieee80211_node *ni)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qwxreg.h,v 1.5 2024/02/02 15:44:19 stsp Exp $ */
/* $OpenBSD: qwxreg.h,v 1.6 2024/02/03 10:03:18 stsp Exp $ */
/*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc.
@ -38,6 +38,16 @@
* core.h
*/
#define ATH11K_TX_MGMT_NUM_PENDING_MAX 512
#define ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
/* Pending management packets threshold for dropping probe responses */
#define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
#define ATH11K_INVALID_HW_MAC_ID 0xFF
#define ATH11K_CONNECTION_LOSS_HZ (3 * HZ)
enum ath11k_hw_rev {
ATH11K_HW_IPQ8074,
ATH11K_HW_QCA6390_HW20,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qwxvar.h,v 1.9 2024/02/02 15:44:19 stsp Exp $ */
/* $OpenBSD: qwxvar.h,v 1.11 2024/02/03 20:07:19 kettenis Exp $ */
/*
* Copyright (c) 2018-2019 The Linux Foundation.
@ -113,6 +113,53 @@ struct ath11k_hw_hal_params {
const struct ath11k_hw_tcl2wbm_rbm_map *tcl2wbm_rbm_map;
};
struct hal_tx_info {
uint16_t meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */
uint8_t ring_id;
uint32_t desc_id;
enum hal_tcl_desc_type type;
enum hal_tcl_encap_type encap_type;
uint64_t paddr;
uint32_t data_len;
uint32_t pkt_offset;
enum hal_encrypt_type encrypt_type;
uint32_t flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */
uint32_t flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
uint16_t addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
uint16_t bss_ast_hash;
uint16_t bss_ast_idx;
uint8_t tid;
uint8_t search_type; /* %HAL_TX_ADDR_SEARCH_ */
uint8_t lmac_id;
uint8_t dscp_tid_tbl_idx;
bool enable_mesh;
uint8_t rbm_id;
};
/* TODO: Check if the actual desc macros can be used instead */
#define HAL_TX_STATUS_FLAGS_FIRST_MSDU BIT(0)
#define HAL_TX_STATUS_FLAGS_LAST_MSDU BIT(1)
#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU BIT(2)
#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID BIT(3)
#define HAL_TX_STATUS_FLAGS_RATE_LDPC BIT(4)
#define HAL_TX_STATUS_FLAGS_RATE_STBC BIT(5)
#define HAL_TX_STATUS_FLAGS_OFDMA BIT(6)
#define HAL_TX_STATUS_DESC_LEN sizeof(struct hal_wbm_release_ring)
/* Tx status parsed from srng desc */
struct hal_tx_status {
enum hal_wbm_rel_src_module buf_rel_source;
enum hal_wbm_tqm_rel_reason status;
uint8_t ack_rssi;
uint32_t flags; /* %HAL_TX_STATUS_FLAGS_ */
uint32_t ppdu_id;
uint8_t try_cnt;
uint8_t tid;
uint16_t peer_id;
uint32_t rate_stats;
};
struct ath11k_hw_params {
const char *name;
uint16_t hw_rev;
@ -209,9 +256,7 @@ struct ath11k_hw_params {
};
struct ath11k_hw_ops {
#if notyet
uint8_t (*get_hw_mac_from_pdev_id)(int pdev_id);
#endif
void (*wmi_init_config)(struct qwx_softc *sc,
struct target_resource_config *config);
int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id);
@ -922,11 +967,9 @@ struct dp_tx_ring {
uint8_t tcl_data_ring_id;
struct dp_srng tcl_data_ring;
struct dp_srng tcl_comp_ring;
#if 0
struct idr txbuf_idr;
/* Protects txbuf_idr and num_pending */
spinlock_t tx_idr_lock;
#endif
int cur;
int queued;
struct qwx_tx_data *data;
struct hal_wbm_release_ring *tx_status;
int tx_status_head;
int tx_status_tail;
@ -1585,6 +1628,7 @@ struct qwx_softc {
struct device sc_dev;
struct ieee80211com sc_ic;
uint32_t sc_flags;
int sc_node;
int (*sc_newstate)(struct ieee80211com *, enum ieee80211_state, int);

View file

@ -2120,8 +2120,10 @@ static struct edid *edid_filter_invalid_blocks(struct edid *edid,
kfree(edid);
#else
new = kmalloc(*alloc_size, GFP_KERNEL);
if (!new)
if (!new) {
kfree(edid);
return NULL;
}
memcpy(new, edid, EDID_LENGTH);
kfree(edid);
#endif

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_qwx_pci.c,v 1.4 2024/01/25 17:00:21 stsp Exp $ */
/* $OpenBSD: if_qwx_pci.c,v 1.5 2024/02/03 20:07:19 kettenis Exp $ */
/*
* Copyright 2023 Stefan Sperling <stsp@openbsd.org>
@ -770,6 +770,10 @@ qwx_pci_attach(struct device *parent, struct device *self, void *aux)
psc->sc_pc = pa->pa_pc;
psc->sc_tag = pa->pa_tag;
#ifdef __HAVE_FDT
sc->sc_node = PCITAG_NODE(pa->pa_tag);
#endif
rw_init(&sc->ioctl_rwl, "qwxioctl");
sreg = pci_conf_read(psc->sc_pc, psc->sc_tag, PCI_SUBSYS_ID_REG);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pcivar.h,v 1.78 2023/04/13 15:07:43 miod Exp $ */
/* $OpenBSD: pcivar.h,v 1.79 2024/02/03 10:37:26 kettenis Exp $ */
/* $NetBSD: pcivar.h,v 1.23 1997/06/06 23:48:05 thorpej Exp $ */
/*
@ -161,13 +161,15 @@ struct pci_attach_args {
*
* OpenBSD doesn't actually use them yet -- csapuntz@cvs.openbsd.org
*/
#define PCI_FLAGS_IO_ENABLED 0x01 /* I/O space is enabled */
#define PCI_FLAGS_MEM_ENABLED 0x02 /* memory space is enabled */
#define PCI_FLAGS_MRL_OKAY 0x04 /* Memory Read Line okay */
#define PCI_FLAGS_MRM_OKAY 0x08 /* Memory Read Multiple okay */
#define PCI_FLAGS_MWI_OKAY 0x10 /* Memory Write and Invalidate
#define PCI_FLAGS_IO_ENABLED 0x01 /* I/O space is enabled */
#define PCI_FLAGS_MEM_ENABLED 0x02 /* memory space is enabled */
#define PCI_FLAGS_MRL_OKAY 0x04 /* Memory Read Line okay */
#define PCI_FLAGS_MRM_OKAY 0x08 /* Memory Read Multiple okay */
#define PCI_FLAGS_MWI_OKAY 0x10 /* Memory Write and Invalidate
okay */
#define PCI_FLAGS_MSI_ENABLED 0x20 /* Message Signaled Interrupt
#define PCI_FLAGS_MSI_ENABLED 0x20 /* Message Signaled Interrupt
enabled */
#define PCI_FLAGS_MSIVEC_ENABLED 0x40 /* Multiple Message Capability
enabled */
/*

View file

@ -1,4 +1,4 @@
/* $OpenBSD: softraid.c,v 1.429 2022/12/21 09:54:23 kn Exp $ */
/* $OpenBSD: softraid.c,v 1.430 2024/02/03 18:51:58 beck Exp $ */
/*
* Copyright (c) 2007, 2008, 2009 Marco Peereboom <marco@peereboom.us>
* Copyright (c) 2008 Chris Kuethe <ckuethe@openbsd.org>
@ -445,7 +445,6 @@ sr_rw(struct sr_softc *sc, dev_t dev, char *buf, size_t size, daddr_t blkno,
splx(s);
}
LIST_INIT(&b.b_dep);
VOP_STRATEGY(vp, &b);
biowait(&b);
@ -2018,8 +2017,6 @@ sr_ccb_rw(struct sr_discipline *sd, int chunk, daddr_t blkno,
splx(s);
}
LIST_INIT(&ccb->ccb_buf.b_dep);
DNPRINTF(SR_D_DIS, "%s: %s %s ccb "
"b_bcount %ld b_blkno %lld b_flags 0x%0lx b_data %p\n",
DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, sd->sd_name,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_physio.c,v 1.48 2023/11/24 00:15:42 asou Exp $ */
/* $OpenBSD: kern_physio.c,v 1.49 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: kern_physio.c,v 1.28 1997/05/19 10:43:28 pk Exp $ */
/*-
@ -84,7 +84,6 @@ physio(void (*strategy)(struct buf *), dev_t dev, int flags,
bp->b_error = 0;
bp->b_proc = p;
bp->b_flags = B_BUSY;
LIST_INIT(&bp->b_dep);
splx(s);
/*

View file

@ -1,4 +1,4 @@
/* $OpenBSD: spec_vnops.c,v 1.111 2022/12/05 23:18:37 deraadt Exp $ */
/* $OpenBSD: spec_vnops.c,v 1.112 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: spec_vnops.c,v 1.29 1996/04/22 01:42:38 christos Exp $ */
/*
@ -444,9 +444,6 @@ spec_strategy(void *v)
struct buf *bp = ap->a_bp;
int maj = major(bp->b_dev);
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_start(bp);
(*bdevsw[maj].d_strategy)(bp);
return (0);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_socket.c,v 1.315 2024/01/26 18:24:23 mvs Exp $ */
/* $OpenBSD: uipc_socket.c,v 1.316 2024/02/03 22:50:08 mvs Exp $ */
/* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */
/*
@ -72,26 +72,20 @@ int filt_soread(struct knote *kn, long hint);
void filt_sowdetach(struct knote *kn);
int filt_sowrite(struct knote *kn, long hint);
int filt_soexcept(struct knote *kn, long hint);
int filt_solisten(struct knote *kn, long hint);
int filt_somodify(struct kevent *kev, struct knote *kn);
int filt_soprocess(struct knote *kn, struct kevent *kev);
const struct filterops solisten_filtops = {
.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
.f_attach = NULL,
.f_detach = filt_sordetach,
.f_event = filt_solisten,
.f_modify = filt_somodify,
.f_process = filt_soprocess,
};
int filt_sowmodify(struct kevent *kev, struct knote *kn);
int filt_sowprocess(struct knote *kn, struct kevent *kev);
int filt_sormodify(struct kevent *kev, struct knote *kn);
int filt_sorprocess(struct knote *kn, struct kevent *kev);
const struct filterops soread_filtops = {
.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
.f_attach = NULL,
.f_detach = filt_sordetach,
.f_event = filt_soread,
.f_modify = filt_somodify,
.f_process = filt_soprocess,
.f_modify = filt_sormodify,
.f_process = filt_sorprocess,
};
const struct filterops sowrite_filtops = {
@ -99,8 +93,8 @@ const struct filterops sowrite_filtops = {
.f_attach = NULL,
.f_detach = filt_sowdetach,
.f_event = filt_sowrite,
.f_modify = filt_somodify,
.f_process = filt_soprocess,
.f_modify = filt_sowmodify,
.f_process = filt_sowprocess,
};
const struct filterops soexcept_filtops = {
@ -108,18 +102,8 @@ const struct filterops soexcept_filtops = {
.f_attach = NULL,
.f_detach = filt_sordetach,
.f_event = filt_soexcept,
.f_modify = filt_somodify,
.f_process = filt_soprocess,
};
void klist_soassertlk(void *);
int klist_solock(void *);
void klist_sounlock(void *, int);
const struct klistops socket_klistops = {
.klo_assertlk = klist_soassertlk,
.klo_lock = klist_solock,
.klo_unlock = klist_sounlock,
.f_modify = filt_sormodify,
.f_process = filt_sorprocess,
};
#ifndef SOMINCONN
@ -158,8 +142,10 @@ soalloc(const struct domain *dp, int wait)
return (NULL);
rw_init_flags(&so->so_lock, dp->dom_name, RWL_DUPOK);
refcnt_init(&so->so_refcnt);
klist_init(&so->so_rcv.sb_klist, &socket_klistops, so);
klist_init(&so->so_snd.sb_klist, &socket_klistops, so);
mtx_init(&so->so_rcv.sb_mtx, IPL_MPFLOOR);
mtx_init(&so->so_snd.sb_mtx, IPL_MPFLOOR);
klist_init_mutex(&so->so_rcv.sb_klist, &so->so_rcv.sb_mtx);
klist_init_mutex(&so->so_snd.sb_klist, &so->so_snd.sb_mtx);
sigio_init(&so->so_sigio);
TAILQ_INIT(&so->so_q0);
TAILQ_INIT(&so->so_q);
@ -1757,7 +1743,7 @@ somove(struct socket *so, int wait)
void
sorwakeup(struct socket *so)
{
soassertlocked(so);
soassertlocked_readonly(so);
#ifdef SOCKET_SPLICE
if (so->so_rcv.sb_flags & SB_SPLICE) {
@ -1785,7 +1771,7 @@ sorwakeup(struct socket *so)
void
sowwakeup(struct socket *so)
{
soassertlocked(so);
soassertlocked_readonly(so);
#ifdef SOCKET_SPLICE
if (so->so_snd.sb_flags & SB_SPLICE)
@ -2134,7 +2120,46 @@ void
sohasoutofband(struct socket *so)
{
pgsigio(&so->so_sigio, SIGURG, 0);
knote_locked(&so->so_rcv.sb_klist, 0);
knote(&so->so_rcv.sb_klist, 0);
}
void
sofilt_lock(struct socket *so, struct sockbuf *sb)
{
switch (so->so_proto->pr_domain->dom_family) {
case PF_INET:
case PF_INET6:
NET_LOCK_SHARED();
break;
default:
rw_enter_write(&so->so_lock);
break;
}
mtx_enter(&sb->sb_mtx);
}
void
sofilt_unlock(struct socket *so, struct sockbuf *sb)
{
mtx_leave(&sb->sb_mtx);
switch (so->so_proto->pr_domain->dom_family) {
case PF_INET:
case PF_INET6:
NET_UNLOCK_SHARED();
break;
default:
rw_exit_write(&so->so_lock);
break;
}
}
static inline void
sofilt_assert_locked(struct socket *so, struct sockbuf *sb)
{
MUTEX_ASSERT_LOCKED(&sb->sb_mtx);
soassertlocked_readonly(so);
}
int
@ -2143,13 +2168,9 @@ soo_kqfilter(struct file *fp, struct knote *kn)
struct socket *so = kn->kn_fp->f_data;
struct sockbuf *sb;
solock(so);
switch (kn->kn_filter) {
case EVFILT_READ:
if (so->so_options & SO_ACCEPTCONN)
kn->kn_fop = &solisten_filtops;
else
kn->kn_fop = &soread_filtops;
kn->kn_fop = &soread_filtops;
sb = &so->so_rcv;
break;
case EVFILT_WRITE:
@ -2161,12 +2182,10 @@ soo_kqfilter(struct file *fp, struct knote *kn)
sb = &so->so_rcv;
break;
default:
sounlock(so);
return (EINVAL);
}
klist_insert_locked(&sb->sb_klist, kn);
sounlock(so);
klist_insert(&sb->sb_klist, kn);
return (0);
}
@ -2185,7 +2204,23 @@ filt_soread(struct knote *kn, long hint)
struct socket *so = kn->kn_fp->f_data;
int rv = 0;
soassertlocked(so);
sofilt_assert_locked(so, &so->so_rcv);
if (so->so_options & SO_ACCEPTCONN) {
kn->kn_data = so->so_qlen;
rv = (kn->kn_data != 0);
if (kn->kn_flags & (__EV_POLL | __EV_SELECT)) {
if (so->so_state & SS_ISDISCONNECTED) {
kn->kn_flags |= __EV_HUP;
rv = 1;
} else {
rv = soreadable(so);
}
}
return rv;
}
kn->kn_data = so->so_rcv.sb_cc;
#ifdef SOCKET_SPLICE
@ -2226,7 +2261,7 @@ filt_sowrite(struct knote *kn, long hint)
struct socket *so = kn->kn_fp->f_data;
int rv;
soassertlocked(so);
sofilt_assert_locked(so, &so->so_snd);
kn->kn_data = sbspace(so, &so->so_snd);
if (so->so_snd.sb_state & SS_CANTSENDMORE) {
@ -2257,7 +2292,7 @@ filt_soexcept(struct knote *kn, long hint)
struct socket *so = kn->kn_fp->f_data;
int rv = 0;
soassertlocked(so);
sofilt_assert_locked(so, &so->so_rcv);
#ifdef SOCKET_SPLICE
if (isspliced(so)) {
@ -2283,77 +2318,55 @@ filt_soexcept(struct knote *kn, long hint)
}
int
filt_solisten(struct knote *kn, long hint)
{
struct socket *so = kn->kn_fp->f_data;
int active;
soassertlocked(so);
kn->kn_data = so->so_qlen;
active = (kn->kn_data != 0);
if (kn->kn_flags & (__EV_POLL | __EV_SELECT)) {
if (so->so_state & SS_ISDISCONNECTED) {
kn->kn_flags |= __EV_HUP;
active = 1;
} else {
active = soreadable(so);
}
}
return (active);
}
int
filt_somodify(struct kevent *kev, struct knote *kn)
filt_sowmodify(struct kevent *kev, struct knote *kn)
{
struct socket *so = kn->kn_fp->f_data;
int rv;
solock(so);
sofilt_lock(so, &so->so_snd);
rv = knote_modify(kev, kn);
sounlock(so);
sofilt_unlock(so, &so->so_snd);
return (rv);
}
int
filt_soprocess(struct knote *kn, struct kevent *kev)
filt_sowprocess(struct knote *kn, struct kevent *kev)
{
struct socket *so = kn->kn_fp->f_data;
int rv;
solock(so);
sofilt_lock(so, &so->so_snd);
rv = knote_process(kn, kev);
sounlock(so);
sofilt_unlock(so, &so->so_snd);
return (rv);
}
void
klist_soassertlk(void *arg)
int
filt_sormodify(struct kevent *kev, struct knote *kn)
{
struct socket *so = arg;
struct socket *so = kn->kn_fp->f_data;
int rv;
soassertlocked(so);
sofilt_lock(so, &so->so_rcv);
rv = knote_modify(kev, kn);
sofilt_unlock(so, &so->so_rcv);
return (rv);
}
int
klist_solock(void *arg)
filt_sorprocess(struct knote *kn, struct kevent *kev)
{
struct socket *so = arg;
struct socket *so = kn->kn_fp->f_data;
int rv;
solock(so);
return (1);
}
sofilt_lock(so, &so->so_rcv);
rv = knote_process(kn, kev);
sofilt_unlock(so, &so->so_rcv);
void
klist_sounlock(void *arg, int ls)
{
struct socket *so = arg;
sounlock(so);
return (rv);
}
#ifdef DDB

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_socket2.c,v 1.140 2024/01/11 14:15:11 bluhm Exp $ */
/* $OpenBSD: uipc_socket2.c,v 1.141 2024/02/03 22:50:08 mvs Exp $ */
/* $NetBSD: uipc_socket2.c,v 1.11 1996/02/04 02:17:55 christos Exp $ */
/*
@ -439,7 +439,7 @@ sounlock_shared(struct socket *so)
}
void
soassertlocked(struct socket *so)
soassertlocked_readonly(struct socket *so)
{
switch (so->so_proto->pr_domain->dom_family) {
case PF_INET:
@ -452,6 +452,27 @@ soassertlocked(struct socket *so)
}
}
void
soassertlocked(struct socket *so)
{
switch (so->so_proto->pr_domain->dom_family) {
case PF_INET:
case PF_INET6:
if (rw_status(&netlock) == RW_READ) {
NET_ASSERT_LOCKED();
if (splassert_ctl > 0 && pru_locked(so) == 0 &&
rw_status(&so->so_lock) != RW_WRITE)
splassert_fail(0, RW_WRITE, __func__);
} else
NET_ASSERT_LOCKED_EXCLUSIVE();
break;
default:
rw_assert_wrlock(&so->so_lock);
break;
}
}
int
sosleep_nsec(struct socket *so, void *ident, int prio, const char *wmesg,
uint64_t nsecs)
@ -489,46 +510,62 @@ sbwait(struct socket *so, struct sockbuf *sb)
soassertlocked(so);
mtx_enter(&sb->sb_mtx);
sb->sb_flags |= SB_WAIT;
mtx_leave(&sb->sb_mtx);
return sosleep_nsec(so, &sb->sb_cc, prio, "netio", sb->sb_timeo_nsecs);
}
int
sblock(struct socket *so, struct sockbuf *sb, int flags)
{
int error, prio = PSOCK;
int error = 0, prio = PSOCK;
soassertlocked(so);
mtx_enter(&sb->sb_mtx);
if ((sb->sb_flags & SB_LOCK) == 0) {
sb->sb_flags |= SB_LOCK;
return (0);
goto out;
}
if ((flags & SBL_WAIT) == 0) {
error = EWOULDBLOCK;
goto out;
}
if ((flags & SBL_WAIT) == 0)
return (EWOULDBLOCK);
if (!(flags & SBL_NOINTR || sb->sb_flags & SB_NOINTR))
prio |= PCATCH;
while (sb->sb_flags & SB_LOCK) {
sb->sb_flags |= SB_WANT;
mtx_leave(&sb->sb_mtx);
error = sosleep_nsec(so, &sb->sb_flags, prio, "netlck", INFSLP);
if (error)
return (error);
mtx_enter(&sb->sb_mtx);
}
sb->sb_flags |= SB_LOCK;
return (0);
out:
mtx_leave(&sb->sb_mtx);
return (error);
}
void
sbunlock(struct socket *so, struct sockbuf *sb)
{
soassertlocked(so);
int dowakeup = 0;
mtx_enter(&sb->sb_mtx);
sb->sb_flags &= ~SB_LOCK;
if (sb->sb_flags & SB_WANT) {
sb->sb_flags &= ~SB_WANT;
wakeup(&sb->sb_flags);
dowakeup = 1;
}
mtx_leave(&sb->sb_mtx);
if (dowakeup)
wakeup(&sb->sb_flags);
}
/*
@ -539,15 +576,24 @@ sbunlock(struct socket *so, struct sockbuf *sb)
void
sowakeup(struct socket *so, struct sockbuf *sb)
{
soassertlocked(so);
int dowakeup = 0, dopgsigio = 0;
mtx_enter(&sb->sb_mtx);
if (sb->sb_flags & SB_WAIT) {
sb->sb_flags &= ~SB_WAIT;
wakeup(&sb->sb_cc);
dowakeup = 1;
}
if (sb->sb_flags & SB_ASYNC)
pgsigio(&so->so_sigio, SIGIO, 0);
dopgsigio = 1;
knote_locked(&sb->sb_klist, 0);
mtx_leave(&sb->sb_mtx);
if (dowakeup)
wakeup(&sb->sb_cc);
if (dopgsigio)
pgsigio(&so->so_sigio, SIGIO, 0);
}
/*

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_syscalls.c,v 1.216 2024/01/03 11:07:04 bluhm Exp $ */
/* $OpenBSD: uipc_syscalls.c,v 1.217 2024/02/03 22:50:09 mvs Exp $ */
/* $NetBSD: uipc_syscalls.c,v 1.19 1996/02/09 19:00:48 christos Exp $ */
/*
@ -326,7 +326,7 @@ doaccept(struct proc *p, int sock, struct sockaddr *name, socklen_t *anamelen,
: (flags & SOCK_NONBLOCK ? FNONBLOCK : 0);
/* connection has been removed from the listen queue */
knote_locked(&head->so_rcv.sb_klist, 0);
knote(&head->so_rcv.sb_klist, 0);
if (persocket)
sounlock(head);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_bio.c,v 1.212 2023/04/26 15:13:52 beck Exp $ */
/* $OpenBSD: vfs_bio.c,v 1.213 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */
/*
@ -65,7 +65,6 @@ int fliphigh;
int nobuffers;
int needbuffer;
struct bio_ops bioops;
/* private bufcache functions */
void bufcache_init(void);
@ -120,8 +119,6 @@ buf_put(struct buf *bp)
if (bp->b_vnbufs.le_next != NOLIST &&
bp->b_vnbufs.le_next != (void *)-1)
panic("buf_put: still on the vnode list");
if (!LIST_EMPTY(&bp->b_dep))
panic("buf_put: b_dep is not empty");
#endif
LIST_REMOVE(bp, b_list);
@ -879,13 +876,6 @@ brelse(struct buf *bp)
if (bp->b_data != NULL)
KASSERT(bp->b_bufsize > 0);
/*
* softdep is basically incompatible with not caching buffers
* that have dependencies, so this buffer must be cached
*/
if (LIST_FIRST(&bp->b_dep) != NULL)
CLR(bp->b_flags, B_NOCACHE);
/*
* Determine which queue the buffer should be on, then put it there.
*/
@ -904,9 +894,6 @@ brelse(struct buf *bp)
* If the buffer is invalid, free it now rather than leaving
* it in a queue and wasting memory.
*/
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_deallocate(bp);
if (ISSET(bp->b_flags, B_DELWRI)) {
CLR(bp->b_flags, B_DELWRI);
}
@ -1150,7 +1137,6 @@ buf_get(struct vnode *vp, daddr_t blkno, size_t size)
bp->b_freelist.tqe_next = NOLIST;
bp->b_dev = NODEV;
LIST_INIT(&bp->b_dep);
bp->b_bcount = size;
buf_acquire_nomap(bp);
@ -1243,16 +1229,6 @@ buf_daemon(void *arg)
if (!ISSET(bp->b_flags, B_DELWRI))
panic("Clean buffer on dirty queue");
#endif
if (LIST_FIRST(&bp->b_dep) != NULL &&
!ISSET(bp->b_flags, B_DEFERRED) &&
buf_countdeps(bp, 0, 0)) {
SET(bp->b_flags, B_DEFERRED);
s = splbio();
bufcache_release(bp);
buf_release(bp);
continue;
}
bawrite(bp);
pushed++;
@ -1321,9 +1297,6 @@ biodone(struct buf *bp)
if (bp->b_bq)
bufq_done(bp->b_bq, bp);
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_complete(bp);
if (!ISSET(bp->b_flags, B_READ)) {
CLR(bp->b_flags, B_WRITEINPROG);
vwakeup(bp->b_vp);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_subr.c,v 1.318 2022/12/26 19:25:49 miod Exp $ */
/* $OpenBSD: vfs_subr.c,v 1.319 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */
/*
@ -1819,6 +1819,10 @@ vfs_syncwait(struct proc *p, int verbose)
* With soft updates, some buffers that are
* written will be remarked as dirty until other
* buffers are written.
*
* XXX here be dragons. this should really go away
* but should be carefully made to go away on it's
* own with testing.. XXX
*/
if (bp->b_flags & B_DELWRI) {
s = splbio();
@ -2249,18 +2253,14 @@ vfs_buf_print(void *b, int full,
bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
(*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
" data %p saveaddr %p dep %p iodone %p\n",
" data %p saveaddr %p iodone %p\n",
bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
bp->b_data, bp->b_saveaddr,
LIST_FIRST(&bp->b_dep), bp->b_iodone);
bp->b_iodone);
(*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
#ifdef FFS_SOFTUPDATES
if (full)
softdep_print(bp, full, pr);
#endif
}
const char *vtypes[] = { VTYPE_NAMES };

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_sync.c,v 1.69 2023/05/25 07:45:33 claudio Exp $ */
/* $OpenBSD: vfs_sync.c,v 1.70 2024/02/03 18:51:58 beck Exp $ */
/*
* Portions of this code are:
@ -50,10 +50,6 @@
#include <sys/malloc.h>
#include <sys/time.h>
#ifdef FFS_SOFTUPDATES
int softdep_process_worklist(struct mount *);
#endif
/*
* The workitem queue.
*/
@ -62,9 +58,6 @@ int softdep_process_worklist(struct mount *);
int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
int syncdelay = SYNCER_DEFAULT; /* time to delay syncing vnodes */
int rushjob = 0; /* number of slots to run ASAP */
int stat_rush_requests = 0; /* number of rush requests */
int syncer_delayno = 0;
long syncer_mask;
LIST_HEAD(synclist, vnode);
@ -198,28 +191,6 @@ syncer_thread(void *arg)
splx(s);
#ifdef FFS_SOFTUPDATES
/*
* Do soft update processing.
*/
softdep_process_worklist(NULL);
#endif
/*
* The variable rushjob allows the kernel to speed up the
* processing of the filesystem syncer process. A rushjob
* value of N tells the filesystem syncer to process the next
* N seconds worth of work on its queue ASAP. Currently rushjob
* is used by the soft update code to speed up the filesystem
* syncer process when the incore state is getting so far
* ahead of the disk that the kernel memory pool is being
* threatened with exhaustion.
*/
if (rushjob > 0) {
rushjob -= 1;
continue;
}
/*
* If it has taken us less than a second to process the
* current work, then wait. Otherwise start right over
@ -236,24 +207,6 @@ syncer_thread(void *arg)
}
}
/*
* Request the syncer daemon to speed up its work.
* We never push it to speed up more than half of its
* normal turn time, otherwise it could take over the cpu.
*/
int
speedup_syncer(void)
{
if (syncerproc)
wakeup_one(&syncer_chan);
if (rushjob < syncdelay / 2) {
rushjob += 1;
stat_rush_requests += 1;
return 1;
}
return 0;
}
/* Routine to create and manage a filesystem syncer vnode. */
int sync_fsync(void *);
int sync_inactive(void *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_syscalls.c,v 1.362 2023/07/05 15:13:28 beck Exp $ */
/* $OpenBSD: vfs_syscalls.c,v 1.363 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: vfs_syscalls.c,v 1.71 1996/04/23 10:29:02 mycroft Exp $ */
/*
@ -2894,10 +2894,6 @@ sys_fsync(struct proc *p, void *v, register_t *retval)
vp = fp->f_data;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
#ifdef FFS_SOFTUPDATES
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
error = softdep_fsync(vp);
#endif
VOP_UNLOCK(vp);
FRELE(fp, p);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: fifo_vnops.c,v 1.102 2023/03/08 04:43:08 guenther Exp $ */
/* $OpenBSD: fifo_vnops.c,v 1.103 2024/02/03 22:50:09 mvs Exp $ */
/* $NetBSD: fifo_vnops.c,v 1.18 1996/03/16 23:52:42 christos Exp $ */
/*
@ -105,16 +105,18 @@ int filt_fiforead(struct knote *kn, long hint);
void filt_fifowdetach(struct knote *kn);
int filt_fifowrite(struct knote *kn, long hint);
int filt_fifoexcept(struct knote *kn, long hint);
int filt_fifomodify(struct kevent *kev, struct knote *kn);
int filt_fifoprocess(struct knote *kn, struct kevent *kev);
int filt_fiformodify(struct kevent *kev, struct knote *kn);
int filt_fiforprocess(struct knote *kn, struct kevent *kev);
int filt_fifowmodify(struct kevent *kev, struct knote *kn);
int filt_fifowprocess(struct knote *kn, struct kevent *kev);
const struct filterops fiforead_filtops = {
.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
.f_attach = NULL,
.f_detach = filt_fifordetach,
.f_event = filt_fiforead,
.f_modify = filt_fifomodify,
.f_process = filt_fifoprocess,
.f_modify = filt_fiformodify,
.f_process = filt_fiforprocess,
};
const struct filterops fifowrite_filtops = {
@ -122,8 +124,8 @@ const struct filterops fifowrite_filtops = {
.f_attach = NULL,
.f_detach = filt_fifowdetach,
.f_event = filt_fifowrite,
.f_modify = filt_fifomodify,
.f_process = filt_fifoprocess,
.f_modify = filt_fifowmodify,
.f_process = filt_fifowprocess,
};
const struct filterops fifoexcept_filtops = {
@ -131,8 +133,8 @@ const struct filterops fifoexcept_filtops = {
.f_attach = NULL,
.f_detach = filt_fifordetach,
.f_event = filt_fifoexcept,
.f_modify = filt_fifomodify,
.f_process = filt_fifoprocess,
.f_modify = filt_fiformodify,
.f_process = filt_fiforprocess,
};
/*
@ -517,6 +519,7 @@ filt_fiforead(struct knote *kn, long hint)
int rv;
soassertlocked(so);
MUTEX_ASSERT_LOCKED(&so->so_rcv.sb_mtx);
kn->kn_data = so->so_rcv.sb_cc;
if (so->so_rcv.sb_state & SS_CANTRCVMORE) {
@ -551,6 +554,7 @@ filt_fifowrite(struct knote *kn, long hint)
int rv;
soassertlocked(so);
MUTEX_ASSERT_LOCKED(&so->so_snd.sb_mtx);
kn->kn_data = sbspace(so, &so->so_snd);
if (so->so_snd.sb_state & SS_CANTSENDMORE) {
@ -571,6 +575,7 @@ filt_fifoexcept(struct knote *kn, long hint)
int rv = 0;
soassertlocked(so);
MUTEX_ASSERT_LOCKED(&so->so_rcv.sb_mtx);
if (kn->kn_flags & __EV_POLL) {
if (so->so_state & SS_ISDISCONNECTED) {
@ -585,26 +590,60 @@ filt_fifoexcept(struct knote *kn, long hint)
}
int
filt_fifomodify(struct kevent *kev, struct knote *kn)
filt_fiformodify(struct kevent *kev, struct knote *kn)
{
struct socket *so = kn->kn_hook;
int rv;
solock(so);
mtx_enter(&so->so_rcv.sb_mtx);
rv = knote_modify(kev, kn);
mtx_leave(&so->so_rcv.sb_mtx);
sounlock(so);
return (rv);
}
int
filt_fifoprocess(struct knote *kn, struct kevent *kev)
filt_fiforprocess(struct knote *kn, struct kevent *kev)
{
struct socket *so = kn->kn_hook;
int rv;
solock(so);
mtx_enter(&so->so_rcv.sb_mtx);
rv = knote_process(kn, kev);
mtx_leave(&so->so_rcv.sb_mtx);
sounlock(so);
return (rv);
}
int
filt_fifowmodify(struct kevent *kev, struct knote *kn)
{
struct socket *so = kn->kn_hook;
int rv;
solock(so);
mtx_enter(&so->so_snd.sb_mtx);
rv = knote_modify(kev, kn);
mtx_leave(&so->so_snd.sb_mtx);
sounlock(so);
return (rv);
}
int
filt_fifowprocess(struct knote *kn, struct kevent *kev)
{
struct socket *so = kn->kn_hook;
int rv;
solock(so);
mtx_enter(&so->so_snd.sb_mtx);
rv = knote_process(kn, kev);
mtx_leave(&so->so_snd.sb_mtx);
sounlock(so);
return (rv);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ip_divert.c,v 1.92 2023/09/16 09:33:27 mpi Exp $ */
/* $OpenBSD: ip_divert.c,v 1.93 2024/02/03 22:50:09 mvs Exp $ */
/*
* Copyright (c) 2009 Michele Marchetto <michele@openbsd.org>
@ -67,6 +67,7 @@ const struct pr_usrreqs divert_usrreqs = {
.pru_detach = divert_detach,
.pru_lock = divert_lock,
.pru_unlock = divert_unlock,
.pru_locked = divert_locked,
.pru_bind = divert_bind,
.pru_shutdown = divert_shutdown,
.pru_send = divert_send,
@ -313,6 +314,14 @@ divert_unlock(struct socket *so)
mtx_leave(&inp->inp_mtx);
}
int
divert_locked(struct socket *so)
{
struct inpcb *inp = sotoinpcb(so);
return mtx_owned(&inp->inp_mtx);
}
int
divert_bind(struct socket *so, struct mbuf *addr, struct proc *p)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ip_divert.h,v 1.24 2022/10/17 14:49:02 mvs Exp $ */
/* $OpenBSD: ip_divert.h,v 1.25 2024/02/03 22:50:09 mvs Exp $ */
/*
* Copyright (c) 2009 Michele Marchetto <michele@openbsd.org>
@ -74,6 +74,7 @@ int divert_attach(struct socket *, int, int);
int divert_detach(struct socket *);
void divert_lock(struct socket *);
void divert_unlock(struct socket *);
int divert_locked(struct socket *);
int divert_bind(struct socket *, struct mbuf *, struct proc *);
int divert_shutdown(struct socket *);
int divert_send(struct socket *, struct mbuf *, struct mbuf *,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ip_var.h,v 1.110 2023/11/26 22:08:10 bluhm Exp $ */
/* $OpenBSD: ip_var.h,v 1.111 2024/02/03 22:50:09 mvs Exp $ */
/* $NetBSD: ip_var.h,v 1.16 1996/02/13 23:43:20 christos Exp $ */
/*
@ -260,6 +260,7 @@ int rip_attach(struct socket *, int, int);
int rip_detach(struct socket *);
void rip_lock(struct socket *);
void rip_unlock(struct socket *);
int rip_locked(struct socket *);
int rip_bind(struct socket *, struct mbuf *, struct proc *);
int rip_connect(struct socket *, struct mbuf *);
int rip_disconnect(struct socket *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: raw_ip.c,v 1.154 2024/01/21 01:17:20 bluhm Exp $ */
/* $OpenBSD: raw_ip.c,v 1.155 2024/02/03 22:50:09 mvs Exp $ */
/* $NetBSD: raw_ip.c,v 1.25 1996/02/18 18:58:33 christos Exp $ */
/*
@ -108,6 +108,7 @@ const struct pr_usrreqs rip_usrreqs = {
.pru_detach = rip_detach,
.pru_lock = rip_lock,
.pru_unlock = rip_unlock,
.pru_locked = rip_locked,
.pru_bind = rip_bind,
.pru_connect = rip_connect,
.pru_disconnect = rip_disconnect,
@ -524,6 +525,14 @@ rip_unlock(struct socket *so)
mtx_leave(&inp->inp_mtx);
}
int
rip_locked(struct socket *so)
{
struct inpcb *inp = sotoinpcb(so);
return mtx_owned(&inp->inp_mtx);
}
int
rip_bind(struct socket *so, struct mbuf *nam, struct proc *p)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: udp_usrreq.c,v 1.316 2024/01/28 20:34:25 bluhm Exp $ */
/* $OpenBSD: udp_usrreq.c,v 1.317 2024/02/03 22:50:09 mvs Exp $ */
/* $NetBSD: udp_usrreq.c,v 1.28 1996/03/16 23:54:03 christos Exp $ */
/*
@ -127,6 +127,7 @@ const struct pr_usrreqs udp_usrreqs = {
.pru_detach = udp_detach,
.pru_lock = udp_lock,
.pru_unlock = udp_unlock,
.pru_locked = udp_locked,
.pru_bind = udp_bind,
.pru_connect = udp_connect,
.pru_disconnect = udp_disconnect,
@ -143,6 +144,7 @@ const struct pr_usrreqs udp6_usrreqs = {
.pru_detach = udp_detach,
.pru_lock = udp_lock,
.pru_unlock = udp_unlock,
.pru_locked = udp_locked,
.pru_bind = udp_bind,
.pru_connect = udp_connect,
.pru_disconnect = udp_disconnect,
@ -1156,6 +1158,14 @@ udp_unlock(struct socket *so)
mtx_leave(&inp->inp_mtx);
}
int
udp_locked(struct socket *so)
{
struct inpcb *inp = sotoinpcb(so);
return mtx_owned(&inp->inp_mtx);
}
int
udp_bind(struct socket *so, struct mbuf *addr, struct proc *p)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: udp_var.h,v 1.50 2024/01/10 16:44:30 bluhm Exp $ */
/* $OpenBSD: udp_var.h,v 1.51 2024/02/03 22:50:09 mvs Exp $ */
/* $NetBSD: udp_var.h,v 1.12 1996/02/13 23:44:41 christos Exp $ */
/*
@ -147,6 +147,7 @@ int udp_attach(struct socket *, int, int);
int udp_detach(struct socket *);
void udp_lock(struct socket *);
void udp_unlock(struct socket *);
int udp_locked(struct socket *);
int udp_bind(struct socket *, struct mbuf *, struct proc *);
int udp_connect(struct socket *, struct mbuf *);
int udp_disconnect(struct socket *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ip6_divert.c,v 1.91 2024/01/01 18:52:09 bluhm Exp $ */
/* $OpenBSD: ip6_divert.c,v 1.92 2024/02/03 22:50:09 mvs Exp $ */
/*
* Copyright (c) 2009 Michele Marchetto <michele@openbsd.org>
@ -69,6 +69,7 @@ const struct pr_usrreqs divert6_usrreqs = {
.pru_detach = divert_detach,
.pru_lock = divert_lock,
.pru_unlock = divert_unlock,
.pru_locked = divert_locked,
.pru_bind = divert_bind,
.pru_shutdown = divert_shutdown,
.pru_send = divert6_send,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ip6_mroute.c,v 1.138 2023/12/06 09:27:17 bluhm Exp $ */
/* $OpenBSD: ip6_mroute.c,v 1.139 2024/02/03 22:50:09 mvs Exp $ */
/* $NetBSD: ip6_mroute.c,v 1.59 2003/12/10 09:28:38 itojun Exp $ */
/* $KAME: ip6_mroute.c,v 1.45 2001/03/25 08:38:51 itojun Exp $ */
@ -861,12 +861,12 @@ socket6_send(struct socket *so, struct mbuf *mm, struct sockaddr_in6 *src)
mtx_enter(&inp->inp_mtx);
ret = sbappendaddr(so, &so->so_rcv, sin6tosa(src), mm, NULL);
if (ret != 0)
sorwakeup(so);
mtx_leave(&inp->inp_mtx);
if (ret != 0) {
sorwakeup(so);
if (ret != 0)
return 0;
}
}
m_freem(mm);
return -1;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ip6_var.h,v 1.109 2023/12/03 20:36:24 bluhm Exp $ */
/* $OpenBSD: ip6_var.h,v 1.110 2024/02/03 22:50:09 mvs Exp $ */
/* $KAME: ip6_var.h,v 1.33 2000/06/11 14:59:20 jinmei Exp $ */
/*
@ -353,6 +353,7 @@ int rip6_attach(struct socket *, int, int);
int rip6_detach(struct socket *);
void rip6_lock(struct socket *);
void rip6_unlock(struct socket *);
int rip6_locked(struct socket *);
int rip6_bind(struct socket *, struct mbuf *, struct proc *);
int rip6_connect(struct socket *, struct mbuf *);
int rip6_disconnect(struct socket *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: raw_ip6.c,v 1.179 2024/01/21 01:17:20 bluhm Exp $ */
/* $OpenBSD: raw_ip6.c,v 1.180 2024/02/03 22:50:09 mvs Exp $ */
/* $KAME: raw_ip6.c,v 1.69 2001/03/04 15:55:44 itojun Exp $ */
/*
@ -110,6 +110,7 @@ const struct pr_usrreqs rip6_usrreqs = {
.pru_detach = rip6_detach,
.pru_lock = rip6_lock,
.pru_unlock = rip6_unlock,
.pru_locked = rip6_locked,
.pru_bind = rip6_bind,
.pru_connect = rip6_connect,
.pru_disconnect = rip6_disconnect,
@ -653,6 +654,14 @@ rip6_unlock(struct socket *so)
mtx_leave(&inp->inp_mtx);
}
int
rip6_locked(struct socket *so)
{
struct inpcb *inp = sotoinpcb(so);
return mtx_owned(&inp->inp_mtx);
}
int
rip6_bind(struct socket *so, struct mbuf *nam, struct proc *p)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: buf.h,v 1.113 2022/09/01 05:24:51 jsg Exp $ */
/* $OpenBSD: buf.h,v 1.114 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: buf.h,v 1.25 1997/04/09 21:12:17 mycroft Exp $ */
/*
@ -51,12 +51,6 @@ struct vnode;
LIST_HEAD(bufhead, buf);
/*
* To avoid including <ufs/ffs/softdep.h>
*/
LIST_HEAD(workhead, worklist);
/*
* Buffer queues
*/
@ -122,20 +116,6 @@ union bufq_data {
struct bufq_nscan bufq_data_nscan;
};
/*
* These are currently used only by the soft dependency code, hence
* are stored once in a global variable. If other subsystems wanted
* to use these hooks, a pointer to a set of bio_ops could be added
* to each buffer.
*/
extern struct bio_ops {
void (*io_start)(struct buf *);
void (*io_complete)(struct buf *);
void (*io_deallocate)(struct buf *);
void (*io_movedeps)(struct buf *, struct buf *);
int (*io_countdeps)(struct buf *, int, int);
} bioops;
/* The buffer header describes an I/O operation in the kernel. */
struct buf {
RBT_ENTRY(buf) b_rbbufs; /* vnode "hash" tree */
@ -172,7 +152,6 @@ struct buf {
int b_dirtyend; /* Offset of end of dirty region. */
int b_validoff; /* Offset in buffer of valid region. */
int b_validend; /* Offset of end of valid region. */
struct workhead b_dep; /* List of filesystem dependencies. */
};
TAILQ_HEAD(bufqueue, buf);
@ -324,43 +303,6 @@ void buf_daemon(void *);
void buf_replacevnode(struct buf *, struct vnode *);
int bread_cluster(struct vnode *, daddr_t, int, struct buf **);
static __inline void
buf_start(struct buf *bp)
{
if (bioops.io_start)
(*bioops.io_start)(bp);
}
static __inline void
buf_complete(struct buf *bp)
{
if (bioops.io_complete)
(*bioops.io_complete)(bp);
}
static __inline void
buf_deallocate(struct buf *bp)
{
if (bioops.io_deallocate)
(*bioops.io_deallocate)(bp);
}
static __inline void
buf_movedeps(struct buf *bp, struct buf *bp2)
{
if (bioops.io_movedeps)
(*bioops.io_movedeps)(bp, bp2);
}
static __inline int
buf_countdeps(struct buf *bp, int i, int islocked)
{
if (bioops.io_countdeps)
return ((*bioops.io_countdeps)(bp, i, islocked));
else
return (0);
}
__END_DECLS
#endif /* _KERNEL */
#endif /* !_SYS_BUF_H_ */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mount.h,v 1.150 2023/07/05 15:13:28 beck Exp $ */
/* $OpenBSD: mount.h,v 1.151 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: mount.h,v 1.48 1996/02/18 11:55:47 fvdl Exp $ */
/*
@ -610,7 +610,6 @@ int vfs_export(struct mount *, struct netexport *, struct export_args *);
struct netcred *vfs_export_lookup(struct mount *, struct netexport *,
struct mbuf *);
int vfs_allocate_syncvnode(struct mount *);
int speedup_syncer(void);
int vfs_syncwait(struct proc *, int); /* sync and wait for complete */
void vfs_shutdown(struct proc *); /* unmount and sync file systems */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mutex.h,v 1.19 2023/12/01 14:37:22 bluhm Exp $ */
/* $OpenBSD: mutex.h,v 1.20 2024/02/03 22:50:09 mvs Exp $ */
/*
* Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
@ -127,6 +127,9 @@ void mtx_leave(struct mutex *);
#define mtx_init(m, ipl) mtx_init_flags(m, ipl, NULL, 0)
#define mtx_owned(mtx) \
(((mtx)->mtx_owner == curcpu()) || panicstr || db_active)
#ifdef WITNESS
void _mtx_init_flags(struct mutex *, int, const char *, int,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: proc.h,v 1.355 2024/01/19 01:43:27 bluhm Exp $ */
/* $OpenBSD: proc.h,v 1.356 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: proc.h,v 1.44 1996/04/22 01:23:21 christos Exp $ */
/*-
@ -436,7 +436,6 @@ struct proc {
#define P_CONTINUED 0x00800000 /* Proc has continued from a stopped state. */
#define P_THREAD 0x04000000 /* Only a thread, not a real process */
#define P_SUSPSIG 0x08000000 /* Stopped from signal. */
#define P_SOFTDEP 0x10000000 /* Stuck processing softdep worklist */
#define P_CPUPEG 0x40000000 /* Do not move to another cpu. */
#define P_BITS \

View file

@ -1,4 +1,4 @@
/* $OpenBSD: protosw.h,v 1.64 2024/01/11 14:15:12 bluhm Exp $ */
/* $OpenBSD: protosw.h,v 1.65 2024/02/03 22:50:09 mvs Exp $ */
/* $NetBSD: protosw.h,v 1.10 1996/04/09 20:55:32 cgd Exp $ */
/*-
@ -69,6 +69,7 @@ struct pr_usrreqs {
int (*pru_detach)(struct socket *);
void (*pru_lock)(struct socket *);
void (*pru_unlock)(struct socket *);
int (*pru_locked)(struct socket *so);
int (*pru_bind)(struct socket *, struct mbuf *, struct proc *);
int (*pru_listen)(struct socket *);
int (*pru_connect)(struct socket *, struct mbuf *);
@ -296,6 +297,14 @@ pru_unlock(struct socket *so)
(*so->so_proto->pr_usrreqs->pru_unlock)(so);
}
static inline int
pru_locked(struct socket *so)
{
if (so->so_proto->pr_usrreqs->pru_locked)
return (*so->so_proto->pr_usrreqs->pru_locked)(so);
return (0);
}
static inline int
pru_bind(struct socket *so, struct mbuf *nam, struct proc *p)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: socketvar.h,v 1.121 2024/01/11 14:15:12 bluhm Exp $ */
/* $OpenBSD: socketvar.h,v 1.122 2024/02/03 22:50:09 mvs Exp $ */
/* $NetBSD: socketvar.h,v 1.18 1996/02/09 18:25:38 christos Exp $ */
/*-
@ -40,6 +40,7 @@
#include <sys/sigio.h> /* for struct sigio_ref */
#include <sys/task.h>
#include <sys/timeout.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/refcnt.h>
@ -105,6 +106,7 @@ struct socket {
* Variables for socket buffering.
*/
struct sockbuf {
struct mutex sb_mtx;
/* The following fields are all zeroed on flush. */
#define sb_startzero sb_cc
u_long sb_cc; /* actual chars in buffer */
@ -174,6 +176,7 @@ struct socket {
#include <lib/libkern/libkern.h>
void soassertlocked(struct socket *);
void soassertlocked_readonly(struct socket *);
static inline void
soref(struct socket *so)
@ -200,9 +203,16 @@ sorele(struct socket *so)
static inline int
sb_notify(struct socket *so, struct sockbuf *sb)
{
int rv;
soassertlocked(so);
return ((sb->sb_flags & (SB_WAIT|SB_ASYNC|SB_SPLICE)) != 0 ||
mtx_enter(&sb->sb_mtx);
rv = ((sb->sb_flags & (SB_WAIT|SB_ASYNC|SB_SPLICE)) != 0 ||
!klist_empty(&sb->sb_klist));
mtx_leave(&sb->sb_mtx);
return rv;
}
/*
@ -211,10 +221,12 @@ sb_notify(struct socket *so, struct sockbuf *sb)
* still be negative (cc > hiwat or mbcnt > mbmax). Should detect
* overflow and return 0.
*/
static inline long
sbspace(struct socket *so, struct sockbuf *sb)
{
soassertlocked(so);
soassertlocked_readonly(so);
return lmin(sb->sb_hiwat - sb->sb_cc, sb->sb_mbmax - sb->sb_mbcnt);
}
@ -230,7 +242,7 @@ sbspace(struct socket *so, struct sockbuf *sb)
static inline int
soreadable(struct socket *so)
{
soassertlocked(so);
soassertlocked_readonly(so);
if (isspliced(so))
return 0;
return (so->so_rcv.sb_state & SS_CANTRCVMORE) || so->so_qlen ||
@ -241,7 +253,7 @@ soreadable(struct socket *so)
static inline int
sowriteable(struct socket *so)
{
soassertlocked(so);
soassertlocked_readonly(so);
return ((sbspace(so, &so->so_snd) >= so->so_snd.sb_lowat &&
((so->so_state & SS_ISCONNECTED) ||
(so->so_proto->pr_flags & PR_CONNREQUIRED)==0)) ||

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vnode.h,v 1.169 2023/09/08 20:00:28 mvs Exp $ */
/* $OpenBSD: vnode.h,v 1.170 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: vnode.h,v 1.38 1996/02/29 20:59:05 cgd Exp $ */
/*
@ -643,7 +643,6 @@ void vn_syncer_add_to_worklist(struct vnode *, int);
/* misc */
int vn_isdisk(struct vnode *, int *);
int softdep_fsync(struct vnode *);
int getvnode(struct proc *, int, struct file **);
/* uvm */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ffs_alloc.c,v 1.114 2021/03/11 13:31:35 jsg Exp $ */
/* $OpenBSD: ffs_alloc.c,v 1.115 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: ffs_alloc.c,v 1.11 1996/05/11 18:27:09 mycroft Exp $ */
/*
@ -291,8 +291,7 @@ ffs_realloccg(struct inode *ip, daddr_t lbprev, daddr_t bpref, int osize,
goto nospace;
(void) uvm_vnp_uncache(ITOV(ip));
if (!DOINGSOFTDEP(ITOV(ip)))
ffs_blkfree(ip, bprev, (long)osize);
ffs_blkfree(ip, bprev, (long)osize);
if (nsize < request)
ffs_blkfree(ip, bno + numfrags(fs, nsize),
(long)(request - nsize));
@ -921,8 +920,6 @@ ffs_fragextend(struct inode *ip, u_int cg, daddr_t bprev, int osize, int nsize)
fs->fs_cs(fs, cg).cs_nffree--;
}
fs->fs_fmod = 1;
if (DOINGSOFTDEP(ITOV(ip)))
softdep_setup_blkmapdep(bp, fs, bprev);
bdwrite(bp);
return (bprev);
@ -1015,8 +1012,6 @@ ffs_alloccg(struct inode *ip, u_int cg, daddr_t bpref, int size)
cgp->cg_frsum[allocsiz - frags]++;
blkno = cgbase(fs, cg) + bno;
if (DOINGSOFTDEP(ITOV(ip)))
softdep_setup_blkmapdep(bp, fs, blkno);
bdwrite(bp);
return (blkno);
}
@ -1082,9 +1077,6 @@ gotit:
fs->fs_fmod = 1;
blkno = cgbase(fs, cgp->cg_cgx) + bno;
if (DOINGSOFTDEP(ITOV(ip)))
softdep_setup_blkmapdep(bp, fs, blkno);
return (blkno);
}
@ -1220,9 +1212,6 @@ gotit:
}
#endif /* FFS2 */
if (DOINGSOFTDEP(ITOV(ip)))
softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref);
setbit(cg_inosused(cgp), ipref);
/* Update the counters we keep on free inodes */
@ -1363,13 +1352,6 @@ ffs_blkfree(struct inode *ip, daddr_t bno, long size)
int
ffs_inode_free(struct inode *pip, ufsino_t ino, mode_t mode)
{
struct vnode *pvp = ITOV(pip);
if (DOINGSOFTDEP(pvp)) {
softdep_freefile(pvp, ino, mode);
return (0);
}
return (ffs_freefile(pip, ino, mode));
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ffs_balloc.c,v 1.45 2019/07/19 00:24:31 cheloha Exp $ */
/* $OpenBSD: ffs_balloc.c,v 1.46 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: ffs_balloc.c,v 1.3 1996/02/09 22:22:21 christos Exp $ */
/*
@ -107,10 +107,6 @@ ffs1_balloc(struct inode *ip, off_t startoffset, int size, struct ucred *cred,
osize, (int)fs->fs_bsize, cred, bpp, &newb);
if (error)
return (error);
if (DOINGSOFTDEP(vp))
softdep_setup_allocdirect(ip, nb, newb,
ip->i_ffs1_db[nb], fs->fs_bsize, osize,
bpp ? *bpp : NULL);
ip->i_ffs1_size = lblktosize(fs, nb + 1);
uvm_vnp_setsize(vp, ip->i_ffs1_size);
@ -179,10 +175,6 @@ ffs1_balloc(struct inode *ip, off_t startoffset, int size, struct ucred *cred,
osize, nsize, cred, bpp, &newb);
if (error)
return (error);
if (DOINGSOFTDEP(vp))
softdep_setup_allocdirect(ip, lbn,
newb, nb, nsize, osize,
bpp ? *bpp : NULL);
}
} else {
/*
@ -207,9 +199,6 @@ ffs1_balloc(struct inode *ip, off_t startoffset, int size, struct ucred *cred,
if (flags & B_CLRBUF)
clrbuf(*bpp);
}
if (DOINGSOFTDEP(vp))
softdep_setup_allocdirect(ip, lbn, newb, 0,
nsize, 0, bpp ? *bpp : NULL);
}
ip->i_ffs1_db[lbn] = newb;
ip->i_flag |= IN_CHANGE | IN_UPDATE;
@ -247,18 +236,12 @@ ffs1_balloc(struct inode *ip, off_t startoffset, int size, struct ucred *cred,
bp->b_blkno = fsbtodb(fs, nb);
clrbuf(bp);
if (DOINGSOFTDEP(vp)) {
softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
newb, 0, fs->fs_bsize, 0, bp);
bdwrite(bp);
} else {
/*
* Write synchronously so that indirect blocks
* never point at garbage.
*/
if ((error = bwrite(bp)) != 0)
goto fail;
}
/*
* Write synchronously so that indirect blocks
* never point at garbage.
*/
if ((error = bwrite(bp)) != 0)
goto fail;
allocib = &ip->i_ffs1_ib[indirs[0].in_off];
*allocib = nb;
ip->i_flag |= IN_CHANGE | IN_UPDATE;
@ -296,19 +279,13 @@ ffs1_balloc(struct inode *ip, off_t startoffset, int size, struct ucred *cred,
nbp->b_blkno = fsbtodb(fs, nb);
clrbuf(nbp);
if (DOINGSOFTDEP(vp)) {
softdep_setup_allocindir_meta(nbp, ip, bp,
indirs[i - 1].in_off, nb);
bdwrite(nbp);
} else {
/*
* Write synchronously so that indirect blocks
* never point at garbage.
*/
if ((error = bwrite(nbp)) != 0) {
brelse(bp);
goto fail;
}
/*
* Write synchronously so that indirect blocks
* never point at garbage.
*/
if ((error = bwrite(nbp)) != 0) {
brelse(bp);
goto fail;
}
bap[indirs[i - 1].in_off] = nb;
if (allocib == NULL && unwindidx < 0)
@ -343,9 +320,6 @@ ffs1_balloc(struct inode *ip, off_t startoffset, int size, struct ucred *cred,
clrbuf(nbp);
*bpp = nbp;
}
if (DOINGSOFTDEP(vp))
softdep_setup_allocindir_page(ip, lbn, bp,
indirs[i].in_off, nb, 0, bpp ? *bpp : NULL);
bap[indirs[i].in_off] = nb;
/*
* If required, write synchronously, otherwise use
@ -473,11 +447,6 @@ ffs2_balloc(struct inode *ip, off_t off, int size, struct ucred *cred,
if (error)
return (error);
if (DOINGSOFTDEP(vp))
softdep_setup_allocdirect(ip, nb, newb,
ip->i_ffs2_db[nb], fs->fs_bsize, osize,
bpp ? *bpp : NULL);
ip->i_ffs2_size = lblktosize(fs, nb + 1);
uvm_vnp_setsize(vp, ip->i_ffs2_size);
ip->i_ffs2_db[nb] = newb;
@ -550,11 +519,6 @@ ffs2_balloc(struct inode *ip, off_t off, int size, struct ucred *cred,
bpp, &newb);
if (error)
return (error);
if (DOINGSOFTDEP(vp))
softdep_setup_allocdirect(ip, lbn,
newb, nb, nsize, osize,
bpp ? *bpp : NULL);
}
} else {
/*
@ -580,10 +544,6 @@ ffs2_balloc(struct inode *ip, off_t off, int size, struct ucred *cred,
clrbuf(bp);
*bpp = bp;
}
if (DOINGSOFTDEP(vp))
softdep_setup_allocdirect(ip, lbn, newb, 0,
nsize, 0, bpp ? *bpp : NULL);
}
ip->i_ffs2_db[lbn] = newb;
@ -626,19 +586,13 @@ ffs2_balloc(struct inode *ip, off_t off, int size, struct ucred *cred,
bp->b_blkno = fsbtodb(fs, nb);
clrbuf(bp);
if (DOINGSOFTDEP(vp)) {
softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
newb, 0, fs->fs_bsize, 0, bp);
bdwrite(bp);
} else {
/*
* Write synchronously so that indirect blocks never
* point at garbage.
*/
error = bwrite(bp);
if (error)
goto fail;
}
/*
* Write synchronously so that indirect blocks never
* point at garbage.
*/
error = bwrite(bp);
if (error)
goto fail;
unwindidx = 0;
allocib = &ip->i_ffs2_ib[indirs[0].in_off];
@ -685,20 +639,14 @@ ffs2_balloc(struct inode *ip, off_t off, int size, struct ucred *cred,
nbp->b_blkno = fsbtodb(fs, nb);
clrbuf(nbp);
if (DOINGSOFTDEP(vp)) {
softdep_setup_allocindir_meta(nbp, ip, bp,
indirs[i - 1].in_off, nb);
bdwrite(nbp);
} else {
/*
* Write synchronously so that indirect blocks never
* point at garbage.
*/
error = bwrite(nbp);
if (error) {
brelse(bp);
goto fail;
}
/*
* Write synchronously so that indirect blocks never
* point at garbage.
*/
error = bwrite(nbp);
if (error) {
brelse(bp);
goto fail;
}
if (unwindidx < 0)
@ -740,10 +688,6 @@ ffs2_balloc(struct inode *ip, off_t off, int size, struct ucred *cred,
*bpp = nbp;
}
if (DOINGSOFTDEP(vp))
softdep_setup_allocindir_page(ip, lbn, bp,
indirs[num].in_off, nb, 0, bpp ? *bpp : NULL);
bap[indirs[num].in_off] = nb;
if (allocib == NULL && unwindidx < 0)
@ -830,11 +774,6 @@ fail:
}
}
if (DOINGSOFTDEP(vp) && unwindidx == 0) {
ip->i_flag |= IN_CHANGE | IN_UPDATE;
ffs_update(ip, 1);
}
/*
* Now that any dependencies that we created have been
* resolved, we can undo the partial allocation.
@ -842,8 +781,6 @@ fail:
if (unwindidx == 0) {
*allocib = 0;
ip->i_flag |= IN_CHANGE | IN_UPDATE;
if (DOINGSOFTDEP(vp))
ffs_update(ip, 1);
} else {
r = bread(vp, indirs[unwindidx].in_lbn,
(int)fs->fs_bsize, &bp);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ffs_extern.h,v 1.45 2020/01/20 23:21:56 claudio Exp $ */
/* $OpenBSD: ffs_extern.h,v 1.46 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: ffs_extern.h,v 1.4 1996/02/09 22:22:22 christos Exp $ */
/*
@ -36,7 +36,6 @@
#define FFS_CLUSTERWRITE 2 /* cluster writing enabled */
#define FFS_REALLOCBLKS 3 /* block reallocation enabled */
#define FFS_ASYNCFREE 4 /* asynchronous block freeing enabled */
#define FFS_MAX_SOFTDEPS 5 /* maximum structs before slowdown */
#define FFS_SD_TICKDELAY 6 /* ticks to pause during slowdown */
#define FFS_SD_WORKLIST_PUSH 7 /* # of worklist cleanups */
#define FFS_SD_BLK_LIMIT_PUSH 8 /* # of times block limit neared */
@ -59,7 +58,6 @@
{ 0, 0 }, \
{ 0, 0 }, \
{ 0, 0 }, \
{ "max_softdeps", CTLTYPE_INT }, \
{ "sd_tickdelay", CTLTYPE_INT }, \
{ "sd_worklist_push", CTLTYPE_INT }, \
{ "sd_blk_limit_push", CTLTYPE_INT }, \
@ -167,28 +165,6 @@ int ffsfifo_reclaim(void *);
struct vop_vfree_args;
struct vop_fsync_args;
void softdep_initialize(void);
int softdep_process_worklist(struct mount *);
int softdep_mount(struct vnode *, struct mount *, struct fs *,
struct ucred *);
int softdep_flushworklist(struct mount *, int *, struct proc *);
int softdep_flushfiles(struct mount *, int, struct proc *);
void softdep_update_inodeblock(struct inode *, struct buf *, int);
void softdep_load_inodeblock(struct inode *);
void softdep_freefile(struct vnode *, ufsino_t, mode_t);
void softdep_setup_freeblocks(struct inode *, off_t);
void softdep_setup_inomapdep(struct buf *, struct inode *, ufsino_t);
void softdep_setup_blkmapdep(struct buf *, struct fs *, daddr_t);
void softdep_setup_allocdirect(struct inode *, daddr_t, daddr_t,
daddr_t, long, long, struct buf *);
void softdep_setup_allocindir_meta(struct buf *, struct inode *,
struct buf *, int, daddr_t);
void softdep_setup_allocindir_page(struct inode *, daddr_t,
struct buf *, int, daddr_t, daddr_t, struct buf *);
void softdep_fsync_mountdev(struct vnode *, int);
int softdep_sync_metadata(struct vop_fsync_args *);
int softdep_fsync(struct vnode *);
extern struct pool ffs_ino_pool; /* memory pool for inodes */
extern struct pool ffs_dinode1_pool; /* memory pool for UFS1 dinodes */
#ifdef FFS2

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ffs_inode.c,v 1.82 2024/01/09 03:15:59 guenther Exp $ */
/* $OpenBSD: ffs_inode.c,v 1.83 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: ffs_inode.c,v 1.10 1996/05/11 18:27:19 mycroft Exp $ */
/*
@ -95,9 +95,7 @@ ffs_update(struct inode *ip, int waitfor)
return (error);
}
if (DOINGSOFTDEP(vp))
softdep_update_inodeblock(ip, bp, waitfor);
else if (ip->i_effnlink != DIP(ip, nlink))
if (ip->i_effnlink != DIP(ip, nlink))
panic("ffs_update: bad link cnt");
#ifdef FFS2
@ -174,30 +172,6 @@ ffs_truncate(struct inode *oip, off_t length, int flags, struct ucred *cred)
oip->i_ci.ci_lasta = oip->i_ci.ci_clen
= oip->i_ci.ci_cstart = oip->i_ci.ci_lastw = 0;
if (DOINGSOFTDEP(ovp)) {
if (length > 0 || softdep_slowdown(ovp)) {
/*
* If a file is only partially truncated, then
* we have to clean up the data structures
* describing the allocation past the truncation
* point. Finding and deallocating those structures
* is a lot of work. Since partial truncation occurs
* rarely, we solve the problem by syncing the file
* so that it will have no data structures left.
*/
if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT,
curproc)) != 0)
return (error);
} else {
(void)ufs_quota_free_blocks(oip, DIP(oip, blocks),
NOCRED);
softdep_setup_freeblocks(oip, length);
vinvalbuf(ovp, 0, cred, curproc, 0, INFSLP);
oip->i_flag |= IN_CHANGE | IN_UPDATE;
return (UFS_UPDATE(oip, 0));
}
}
osize = DIP(oip, size);
/*
* Lengthen the size of the file. We must ensure that the
@ -244,18 +218,6 @@ ffs_truncate(struct inode *oip, off_t length, int flags, struct ucred *cred)
cred, aflags, &bp);
if (error)
return (error);
/*
* When we are doing soft updates and the UFS_BALLOC
* above fills in a direct block hole with a full sized
* block that will be truncated down to a fragment below,
* we must flush out the block dependency with an FSYNC
* so that we do not get a soft updates inconsistency
* when we create the fragment below.
*/
if (DOINGSOFTDEP(ovp) && lbn < NDADDR &&
fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize &&
(error = VOP_FSYNC(ovp, cred, MNT_WAIT, curproc)) != 0)
return (error);
DIP_ASSIGN(oip, size, length);
size = blksize(fs, oip, lbn);
(void) uvm_vnp_uncache(ovp);

File diff suppressed because it is too large Load diff

View file

@ -1,193 +0,0 @@
/* $OpenBSD: ffs_softdep_stub.c,v 1.18 2013/06/11 16:42:18 deraadt Exp $ */
/*
* Copyright 1998 Marshall Kirk McKusick. All Rights Reserved.
*
* The soft updates code is derived from the appendix of a University
* of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
* "Soft Updates: A Solution to the Metadata Update Problem in File
* Systems", CSE-TR-254-95, August 1995).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. None of the names of McKusick, Ganger, or the University of Michigan
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)ffs_softdep_stub.c 9.1 (McKusick) 7/10/97
* $FreeBSD: src/sys/ufs/ffs/ffs_softdep_stub.c,v 1.14 2000/08/09 00:41:54 tegge Exp $
*/
#ifndef FFS_SOFTUPDATES
#include <sys/param.h>
#include <sys/vnode.h>
#include <sys/systm.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ffs/ffs_extern.h>
#include <ufs/ufs/ufs_extern.h>
int
softdep_flushfiles(struct mount *oldmnt, int flags, struct proc *p)
{
panic("softdep_flushfiles called");
}
int
softdep_mount(struct vnode *devvp, struct mount *mp, struct fs *fs,
struct ucred *cred)
{
return (0);
}
void
softdep_initialize(void)
{
return;
}
#ifndef __OPTIMIZE__
void
softdep_setup_inomapdep(struct buf *bp, struct inode *ip, ufsino_t newinum)
{
panic("softdep_setup_inomapdep called");
}
void
softdep_setup_blkmapdep(struct buf *bp, struct fs *fs, daddr_t newblkno)
{
panic("softdep_setup_blkmapdep called");
}
void
softdep_setup_allocdirect(struct inode *ip, daddr_t lbn, daddr_t newblkno,
daddr_t oldblkno, long newsize, long oldsize, struct buf *bp)
{
panic("softdep_setup_allocdirect called");
}
void
softdep_setup_allocindir_page(struct inode *ip, daddr_t lbn, struct buf *bp,
int ptrno, daddr_t newblkno, daddr_t oldblkno, struct buf *nbp)
{
panic("softdep_setup_allocindir_page called");
}
void
softdep_setup_allocindir_meta(struct buf *nbp, struct inode *ip,
struct buf *bp, int ptrno, daddr_t newblkno)
{
panic("softdep_setup_allocindir_meta called");
}
void
softdep_setup_freeblocks(struct inode *ip, off_t length)
{
panic("softdep_setup_freeblocks called");
}
void
softdep_freefile(struct vnode *pvp, ufsino_t ino, mode_t mode)
{
panic("softdep_freefile called");
}
int
softdep_setup_directory_add(struct buf *bp, struct inode *dp, off_t diroffset,
long newinum, struct buf *newdirbp, int isnewblk)
{
panic("softdep_setup_directory_add called");
return (0);
}
void
softdep_change_directoryentry_offset(struct inode *dp, caddr_t base,
caddr_t oldloc, caddr_t newloc, int entrysize)
{
panic("softdep_change_directoryentry_offset called");
}
void
softdep_setup_remove(struct buf *bp, struct inode *dp, struct inode *ip,
int isrmdir)
{
panic("softdep_setup_remove called");
}
void
softdep_setup_directory_change(struct buf *bp, struct inode *dp,
struct inode *ip, long newinum, int isrmdir)
{
panic("softdep_setup_directory_change called");
}
void
softdep_change_linkcnt(struct inode *ip, int nodelay)
{
panic("softdep_change_linkcnt called");
}
void
softdep_load_inodeblock(struct inode *ip)
{
panic("softdep_load_inodeblock called");
}
void
softdep_update_inodeblock(struct inode *ip, struct buf *bp, int waitfor)
{
panic("softdep_update_inodeblock called");
}
#endif
void
softdep_fsync_mountdev(struct vnode *vp, int waitfor)
{
return;
}
int
softdep_flushworklist(struct mount *oldmnt, int *countp, struct proc *p)
{
*countp = 0;
return (0);
}
int
softdep_sync_metadata(struct vop_fsync_args *ap)
{
return (0);
}
#ifndef __OPTIMIZE__
int
softdep_slowdown(struct vnode *vp)
{
panic("softdep_slowdown called");
}
#endif
#endif /* !FFS_SOFTUPDATES */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ffs_vfsops.c,v 1.197 2024/01/19 18:58:17 deraadt Exp $ */
/* $OpenBSD: ffs_vfsops.c,v 1.198 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: ffs_vfsops.c,v 1.19 1996/02/09 22:22:26 christos Exp $ */
/*
@ -213,20 +213,6 @@ ffs_mount(struct mount *mp, const char *path, void *data,
int error = 0, flags;
int ronly;
/* Ask not for whom the bell tolls */
if (mp->mnt_flag & MNT_SOFTDEP) {
mp->mnt_flag &= ~MNT_SOFTDEP;
}
/*
* Soft updates is incompatible with "async",
* so if we are doing softupdates stop the user
* from setting the async flag.
*/
if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
(MNT_SOFTDEP | MNT_ASYNC)) {
return (EINVAL);
}
/*
* If updating, check whether changing from read-only to
* read/write; if there is no device name, that's all we do.
@ -238,16 +224,6 @@ ffs_mount(struct mount *mp, const char *path, void *data,
error = 0;
ronly = fs->fs_ronly;
/*
* Soft updates won't be set if read/write,
* so "async" will be illegal.
*/
if (ronly == 0 && (mp->mnt_flag & MNT_ASYNC) &&
(fs->fs_flags & FS_DOSOFTDEP)) {
error = EINVAL;
goto error_1;
}
if (ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
/* Flush any dirty data */
VFS_SYNC(mp, MNT_WAIT, 0, p->p_ucred, p);
@ -260,46 +236,11 @@ ffs_mount(struct mount *mp, const char *path, void *data,
flags |= IGNORECLEAN;
if (mp->mnt_flag & MNT_FORCE)
flags |= FORCECLOSE;
if (fs->fs_flags & FS_DOSOFTDEP) {
error = softdep_flushfiles(mp, flags, p);
mp->mnt_flag &= ~MNT_SOFTDEP;
} else
error = ffs_flushfiles(mp, flags, p);
error = ffs_flushfiles(mp, flags, p);
mp->mnt_flag |= MNT_RDONLY;
ronly = 1;
}
/*
* Flush soft dependencies if disabling it via an update
* mount. This may leave some items to be processed,
* so don't do this yet XXX.
*/
if ((fs->fs_flags & FS_DOSOFTDEP) &&
!(mp->mnt_flag & MNT_SOFTDEP) &&
!(mp->mnt_flag & MNT_RDONLY) && fs->fs_ronly == 0) {
#if 0
flags = WRITECLOSE;
if (mp->mnt_flag & MNT_FORCE)
flags |= FORCECLOSE;
error = softdep_flushfiles(mp, flags, p);
#endif
}
/*
* When upgrading to a softdep mount, we must first flush
* all vnodes. (not done yet -- see above)
*/
if (!(fs->fs_flags & FS_DOSOFTDEP) &&
(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
#if 0
flags = WRITECLOSE;
if (mp->mnt_flag & MNT_FORCE)
flags |= FORCECLOSE;
error = ffs_flushfiles(mp, flags, p);
#else
mp->mnt_flag &= ~MNT_SOFTDEP;
#endif
}
if (!error && (mp->mnt_flag & MNT_RELOAD))
error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
if (error)
@ -307,19 +248,6 @@ ffs_mount(struct mount *mp, const char *path, void *data,
if (ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
if (fs->fs_clean == 0) {
#if 0
/*
* It is safe to mount an unclean file system
* if it was previously mounted with softdep
* but we may lose space and must
* sometimes run fsck manually.
*/
if (fs->fs_flags & FS_DOSOFTDEP)
printf(
"WARNING: %s was not properly unmounted\n",
fs->fs_fsmnt);
else
#endif
if (mp->mnt_flag & MNT_FORCE) {
printf(
"WARNING: %s was not properly unmounted\n",
@ -333,12 +261,6 @@ ffs_mount(struct mount *mp, const char *path, void *data,
}
}
if ((fs->fs_flags & FS_DOSOFTDEP)) {
error = softdep_mount(devvp, mp, fs,
p->p_ucred);
if (error)
goto error_1;
}
fs->fs_contigdirs = malloc((u_long)fs->fs_ncg,
M_UFSMNT, M_WAITOK|M_ZERO);
@ -454,9 +376,6 @@ success:
if (ronly)
free(fs->fs_contigdirs, M_UFSMNT, fs->fs_ncg);
}
if (!ronly) {
fs->fs_flags &= ~FS_DOSOFTDEP;
}
ffs_sbupdate(ump, MNT_WAIT);
#if 0
if (ronly) {
@ -627,8 +546,6 @@ ffs_reload(struct mount *mountp, struct ucred *cred, struct proc *p)
space += size;
brelse(bp);
}
if ((fs->fs_flags & FS_DOSOFTDEP))
(void) softdep_mount(devvp, mountp, fs, cred);
/*
* We no longer know anything about clusters per cylinder group.
*/
@ -767,19 +684,6 @@ ffs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p)
fs->fs_fmod = 0;
fs->fs_flags &= ~FS_UNCLEAN;
if (fs->fs_clean == 0) {
#if 0
/*
* It is safe to mount an unclean file system
* if it was previously mounted with softdep
* but we may lose space and must
* sometimes run fsck manually.
*/
if (fs->fs_flags & FS_DOSOFTDEP)
printf(
"WARNING: %s was not properly unmounted\n",
fs->fs_fsmnt);
else
#endif
if (ronly || (mp->mnt_flag & MNT_FORCE)) {
printf(
"WARNING: %s was not properly unmounted\n",
@ -908,15 +812,8 @@ ffs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p)
if (fs->fs_maxfilesize > maxfilesize) /* XXX */
fs->fs_maxfilesize = maxfilesize; /* XXX */
if (ronly == 0) {
if ((fs->fs_flags & FS_DOSOFTDEP) &&
(error = softdep_mount(devvp, mp, fs, cred)) != 0) {
free(fs->fs_csp, M_UFSMNT, 0);
free(fs->fs_contigdirs, M_UFSMNT, fs->fs_ncg);
goto out;
}
fs->fs_fmod = 1;
fs->fs_clean = 0;
fs->fs_flags &= ~FS_DOSOFTDEP;
error = ffs_sbupdate(ump, MNT_WAIT);
if (error == EROFS)
goto out;
@ -1028,10 +925,7 @@ ffs_unmount(struct mount *mp, int mntflags, struct proc *p)
ump = VFSTOUFS(mp);
fs = ump->um_fs;
if (mp->mnt_flag & MNT_SOFTDEP)
error = softdep_flushfiles(mp, flags, p);
else
error = ffs_flushfiles(mp, flags, p);
error = ffs_flushfiles(mp, flags, p);
if (error != 0)
return (error);
@ -1206,7 +1100,7 @@ ffs_sync(struct mount *mp, int waitfor, int stall, struct ucred *cred, struct pr
{
struct ufsmount *ump = VFSTOUFS(mp);
struct fs *fs;
int error, allerror = 0, count, clean, fmod;
int error, allerror = 0, clean, fmod;
struct ffs_sync_args fsa;
fs = ump->um_fs;
@ -1219,7 +1113,7 @@ ffs_sync(struct mount *mp, int waitfor, int stall, struct ucred *cred, struct pr
printf("fs = %s\n", fs->fs_fsmnt);
panic("update: rofs mod");
}
loop:
/*
* Write back each (modified) inode.
*/
@ -1241,13 +1135,6 @@ ffs_sync(struct mount *mp, int waitfor, int stall, struct ucred *cred, struct pr
/*
* Force stale file system control information to be flushed.
*/
if ((ump->um_mountp->mnt_flag & MNT_SOFTDEP) && waitfor == MNT_WAIT) {
if ((error = softdep_flushworklist(ump->um_mountp, &count, p)))
allerror = error;
/* Flushed work items may create new vnodes to clean */
if (count)
goto loop;
}
if (waitfor != MNT_LAZY) {
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) != 0)
@ -1387,10 +1274,7 @@ retry:
brelse(bp);
if (DOINGSOFTDEP(vp))
softdep_load_inodeblock(ip);
else
ip->i_effnlink = DIP(ip, nlink);
ip->i_effnlink = DIP(ip, nlink);
/*
* Initialize the vnode from the inode, check for aliases.
@ -1556,32 +1440,10 @@ ffs_init(struct vfsconf *vfsp)
PR_WAITOK, "dino2pl", NULL);
#endif
softdep_initialize();
return (ufs_init(vfsp));
}
#ifdef FFS_SOFTUPDATES
extern int max_softdeps, tickdelay, stat_worklist_push;
extern int stat_blk_limit_push, stat_ino_limit_push, stat_blk_limit_hit;
extern int stat_ino_limit_hit, stat_sync_limit_hit, stat_indir_blk_ptrs;
extern int stat_inode_bitmap, stat_direct_blk_ptrs, stat_dir_entry;
#endif
const struct sysctl_bounded_args ffs_vars[] = {
#ifdef FFS_SOFTUPDATES
{ FFS_MAX_SOFTDEPS, &max_softdeps, 0, INT_MAX },
{ FFS_SD_TICKDELAY, &tickdelay, 2, INT_MAX },
{ FFS_SD_WORKLIST_PUSH, &stat_worklist_push, SYSCTL_INT_READONLY },
{ FFS_SD_BLK_LIMIT_PUSH, &stat_blk_limit_push, SYSCTL_INT_READONLY },
{ FFS_SD_INO_LIMIT_PUSH, &stat_ino_limit_push, SYSCTL_INT_READONLY },
{ FFS_SD_BLK_LIMIT_HIT, &stat_blk_limit_hit, SYSCTL_INT_READONLY },
{ FFS_SD_INO_LIMIT_HIT, &stat_ino_limit_hit, SYSCTL_INT_READONLY },
{ FFS_SD_SYNC_LIMIT_HIT, &stat_sync_limit_hit, SYSCTL_INT_READONLY },
{ FFS_SD_INDIR_BLK_PTRS, &stat_indir_blk_ptrs, SYSCTL_INT_READONLY },
{ FFS_SD_INODE_BITMAP, &stat_inode_bitmap, SYSCTL_INT_READONLY },
{ FFS_SD_DIRECT_BLK_PTRS, &stat_direct_blk_ptrs, SYSCTL_INT_READONLY },
{ FFS_SD_DIR_ENTRY, &stat_dir_entry, SYSCTL_INT_READONLY },
#endif
#ifdef UFS_DIRHASH
{ FFS_DIRHASH_DIRSIZE, &ufs_mindirhashsize, 0, INT_MAX },
{ FFS_DIRHASH_MAXMEM, &ufs_dirhashmaxmem, 0, INT_MAX },

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ffs_vnops.c,v 1.101 2024/01/09 03:16:00 guenther Exp $ */
/* $OpenBSD: ffs_vnops.c,v 1.102 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: ffs_vnops.c,v 1.7 1996/05/11 18:27:24 mycroft Exp $ */
/*
@ -420,11 +420,6 @@ ffs_fsync(void *v)
struct buf *bp, *nbp;
int s, error, passes, skipmeta;
if (vp->v_type == VBLK &&
vp->v_specmountpoint != NULL &&
(vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP))
softdep_fsync_mountdev(vp, ap->a_waitfor);
/*
* Flush all dirty buffers associated with a vnode.
*/
@ -452,13 +447,6 @@ loop:
panic("ffs_fsync: not dirty");
if (skipmeta && bp->b_lblkno < 0)
continue;
if (ap->a_waitfor != MNT_WAIT &&
LIST_FIRST(&bp->b_dep) != NULL &&
(bp->b_flags & B_DEFERRED) == 0 &&
buf_countdeps(bp, 0, 1)) {
bp->b_flags |= B_DEFERRED;
continue;
}
bremfree(bp);
buf_acquire(bp);
@ -492,8 +480,7 @@ loop:
* with the vnode has been written.
*/
splx(s);
if ((error = softdep_sync_metadata(ap)) != 0)
return (error);
/* XXX softdep was here. reconsider this locking dance */
s = splbio();
if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
/*

View file

@ -1,4 +1,4 @@
/* $OpenBSD: fs.h,v 1.44 2022/01/11 03:13:59 jsg Exp $ */
/* $OpenBSD: fs.h,v 1.45 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: fs.h,v 1.6 1995/04/12 21:21:02 mycroft Exp $ */
/*
@ -328,7 +328,6 @@ struct fs {
* Filesystem flags.
*/
#define FS_UNCLEAN 0x01 /* filesystem not clean at mount */
#define FS_DOSOFTDEP 0x02 /* filesystem using soft dependencies */
/*
* The following flag is used to detect a FFS1 file system that had its flags
* moved to the new (FFS2) location for compatibility.

View file

@ -1,590 +0,0 @@
/* $OpenBSD: softdep.h,v 1.18 2018/04/01 12:02:00 dhill Exp $ */
/*
* Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved.
*
* The soft updates code is derived from the appendix of a University
* of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
* "Soft Updates: A Solution to the Metadata Update Problem in File
* Systems", CSE-TR-254-95, August 1995).
*
* Further information about soft updates can be obtained from:
*
* Marshall Kirk McKusick http://www.mckusick.com/softdep/
* 1614 Oxford Street mckusick@mckusick.com
* Berkeley, CA 94709-1608 +1-510-843-9542
* USA
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)softdep.h 9.7 (McKusick) 6/21/00
* $FreeBSD: src/sys/ufs/ffs/softdep.h,v 1.10 2000/06/22 00:29:53 mckusick Exp $
*/
#include <sys/queue.h>
/*
* Allocation dependencies are handled with undo/redo on the in-memory
* copy of the data. A particular data dependency is eliminated when
* it is ALLCOMPLETE: that is ATTACHED, DEPCOMPLETE, and COMPLETE.
*
* ATTACHED means that the data is not currently being written to
* disk. UNDONE means that the data has been rolled back to a safe
* state for writing to the disk. When the I/O completes, the data is
* restored to its current form and the state reverts to ATTACHED.
* The data must be locked throughout the rollback, I/O, and roll
* forward so that the rolled back information is never visible to
* user processes. The COMPLETE flag indicates that the item has been
* written. For example, a dependency that requires that an inode be
* written will be marked COMPLETE after the inode has been written
* to disk. The DEPCOMPLETE flag indicates the completion of any other
* dependencies such as the writing of a cylinder group map has been
* completed. A dependency structure may be freed only when both it
* and its dependencies have completed and any rollbacks that are in
* progress have finished as indicated by the set of ALLCOMPLETE flags
* all being set. The two MKDIR flags indicate additional dependencies
* that must be done when creating a new directory. MKDIR_BODY is
* cleared when the directory data block containing the "." and ".."
* entries has been written. MKDIR_PARENT is cleared when the parent
* inode with the increased link count for ".." has been written. When
* both MKDIR flags have been cleared, the DEPCOMPLETE flag is set to
* indicate that the directory dependencies have been completed. The
* writing of the directory inode itself sets the COMPLETE flag which
* then allows the directory entry for the new directory to be written
* to disk. The RMDIR flag marks a dirrem structure as representing
* the removal of a directory rather than a file. When the removal
* dependencies are completed, additional work needs to be done
* (truncation of the "." and ".." entries, an additional decrement
* of the associated inode, and a decrement of the parent inode). The
* DIRCHG flag marks a diradd structure as representing the changing
* of an existing entry rather than the addition of a new one. When
* the update is complete the dirrem associated with the inode for
* the old name must be added to the worklist to do the necessary
* reference count decrement. The GOINGAWAY flag indicates that the
* data structure is frozen from further change until its dependencies
* have been completed and its resources freed after which it will be
* discarded. The IOSTARTED flag prevents multiple calls to the I/O
* start routine from doing multiple rollbacks. The SPACECOUNTED flag
* says that the files space has been accounted to the pending free
* space count. The NEWBLOCK flag marks pagedep structures that have
* just been allocated, so must be claimed by the inode before all
* dependencies are complete. The ONWORKLIST flag shows whether the
* structure is currently linked onto a worklist.
*
*/
#define ATTACHED 0x0001
#define UNDONE 0x0002
#define COMPLETE 0x0004
#define DEPCOMPLETE 0x0008
#define MKDIR_PARENT 0x0010 /* diradd & mkdir only */
#define MKDIR_BODY 0x0020 /* diradd & mkdir only */
#define RMDIR 0x0040 /* dirrem only */
#define DIRCHG 0x0080 /* diradd & dirrem only */
#define GOINGAWAY 0x0100 /* indirdep only */
#define IOSTARTED 0x0200 /* inodedep & pagedep only */
#define SPACECOUNTED 0x0400 /* inodedep only */
#define NEWBLOCK 0x0800 /* pagedep only */
#define UFS1FMT 0x2000 /* indirdep only */
#define ONWORKLIST 0x8000
#define ALLCOMPLETE (ATTACHED | COMPLETE | DEPCOMPLETE)
#define DEP_BITS "\020\01ATTACHED\02UNDONE\03COMPLETE\04DEPCOMPLETE" \
"\05MKDIR_PARENT\06MKDIR_BODY\07RMDIR\010DIRCHG\011GOINGAWAY" \
"\012IOSTARTED\013SPACECOUNTED\014NEWBLOCK\016UFS1FMT\020ONWORKLIST"
/*
* The workitem queue.
*
* It is sometimes useful and/or necessary to clean up certain dependencies
* in the background rather than during execution of an application process
* or interrupt service routine. To realize this, we append dependency
* structures corresponding to such tasks to a "workitem" queue. In a soft
* updates implementation, most pending workitems should not wait for more
* than a couple of seconds, so the filesystem syncer process awakens once
* per second to process the items on the queue.
*/
/* LIST_HEAD(workhead, worklist); -- declared in buf.h */
/*
* Each request can be linked onto a work queue through its worklist structure.
* To avoid the need for a pointer to the structure itself, this structure
* MUST be declared FIRST in each type in which it appears! If more than one
* worklist is needed in the structure, then a wk_data field must be added
* and the macros below changed to use it.
*/
struct worklist {
LIST_ENTRY(worklist) wk_list; /* list of work requests */
unsigned short wk_type; /* type of request */
unsigned short wk_state; /* state flags */
};
#define WK_DATA(wk) ((void *)(wk))
#define WK_PAGEDEP(wk) ((struct pagedep *)(wk))
#define WK_INODEDEP(wk) ((struct inodedep *)(wk))
#define WK_NEWBLK(wk) ((struct newblk *)(wk))
#define WK_BMSAFEMAP(wk) ((struct bmsafemap *)(wk))
#define WK_ALLOCDIRECT(wk) ((struct allocdirect *)(wk))
#define WK_INDIRDEP(wk) ((struct indirdep *)(wk))
#define WK_ALLOCINDIR(wk) ((struct allocindir *)(wk))
#define WK_FREEFRAG(wk) ((struct freefrag *)(wk))
#define WK_FREEBLKS(wk) ((struct freeblks *)(wk))
#define WK_FREEFILE(wk) ((struct freefile *)(wk))
#define WK_DIRADD(wk) ((struct diradd *)(wk))
#define WK_MKDIR(wk) ((struct mkdir *)(wk))
#define WK_DIRREM(wk) ((struct dirrem *)(wk))
#define WK_NEWDIRBLK(wk) ((struct newdirblk *)(wk))
/*
* Various types of lists
*/
LIST_HEAD(dirremhd, dirrem);
LIST_HEAD(diraddhd, diradd);
LIST_HEAD(newblkhd, newblk);
LIST_HEAD(inodedephd, inodedep);
LIST_HEAD(allocindirhd, allocindir);
LIST_HEAD(allocdirecthd, allocdirect);
TAILQ_HEAD(allocdirectlst, allocdirect);
/*
* The "pagedep" structure tracks the various dependencies related to
* a particular directory page. If a directory page has any dependencies,
* it will have a pagedep linked to its associated buffer. The
* pd_dirremhd list holds the list of dirrem requests which decrement
* inode reference counts. These requests are processed after the
* directory page with the corresponding zero'ed entries has been
* written. The pd_diraddhd list maintains the list of diradd requests
* which cannot be committed until their corresponding inode has been
* written to disk. Because a directory may have many new entries
* being created, several lists are maintained hashed on bits of the
* offset of the entry into the directory page to keep the lists from
* getting too long. Once a new directory entry has been cleared to
* be written, it is moved to the pd_pendinghd list. After the new
* entry has been written to disk it is removed from the pd_pendinghd
* list, any removed operations are done, and the dependency structure
* is freed.
*/
#define DAHASHSZ 6
#define DIRADDHASH(offset) (((offset) >> 2) % DAHASHSZ)
struct pagedep {
struct worklist pd_list; /* page buffer */
# define pd_state pd_list.wk_state /* check for multiple I/O starts */
LIST_ENTRY(pagedep) pd_hash; /* hashed lookup */
struct mount *pd_mnt; /* associated mount point */
ufsino_t pd_ino; /* associated file */
daddr_t pd_lbn; /* block within file */
struct dirremhd pd_dirremhd; /* dirrem's waiting for page */
struct diraddhd pd_diraddhd[DAHASHSZ]; /* diradd dir entry updates */
struct diraddhd pd_pendinghd; /* directory entries awaiting write */
};
/*
* The "inodedep" structure tracks the set of dependencies associated
* with an inode. One task that it must manage is delayed operations
* (i.e., work requests that must be held until the inodedep's associated
* inode has been written to disk). Getting an inode from its incore
* state to the disk requires two steps to be taken by the filesystem
* in this order: first the inode must be copied to its disk buffer by
* the VOP_UPDATE operation; second the inode's buffer must be written
* to disk. To ensure that both operations have happened in the required
* order, the inodedep maintains two lists. Delayed operations are
* placed on the id_inowait list. When the VOP_UPDATE is done, all
* operations on the id_inowait list are moved to the id_bufwait list.
* When the buffer is written, the items on the id_bufwait list can be
* safely moved to the work queue to be processed. A second task of the
* inodedep structure is to track the status of block allocation within
* the inode. Each block that is allocated is represented by an
* "allocdirect" structure (see below). It is linked onto the id_newinoupdt
* list until both its contents and its allocation in the cylinder
* group map have been written to disk. Once these dependencies have been
* satisfied, it is removed from the id_newinoupdt list and any followup
* actions such as releasing the previous block or fragment are placed
* on the id_inowait list. When an inode is updated (a VOP_UPDATE is
* done), the "inodedep" structure is linked onto the buffer through
* its worklist. Thus, it will be notified when the buffer is about
* to be written and when it is done. At the update time, all the
* elements on the id_newinoupdt list are moved to the id_inoupdt list
* since those changes are now relevant to the copy of the inode in the
* buffer. Also at update time, the tasks on the id_inowait list are
* moved to the id_bufwait list so that they will be executed when
* the updated inode has been written to disk. When the buffer containing
* the inode is written to disk, any updates listed on the id_inoupdt
* list are rolled back as they are not yet safe. Following the write,
* the changes are once again rolled forward and any actions on the
* id_bufwait list are processed (since those actions are now safe).
* The entries on the id_inoupdt and id_newinoupdt lists must be kept
* sorted by logical block number to speed the calculation of the size
* of the rolled back inode (see explanation in initiate_write_inodeblock).
* When a directory entry is created, it is represented by a diradd.
* The diradd is added to the id_inowait list as it cannot be safely
* written to disk until the inode that it represents is on disk. After
* the inode is written, the id_bufwait list is processed and the diradd
* entries are moved to the id_pendinghd list where they remain until
* the directory block containing the name has been written to disk.
* The purpose of keeping the entries on the id_pendinghd list is so that
* the softdep_fsync function can find and push the inode's directory
* name(s) as part of the fsync operation for that file.
*/
struct inodedep {
struct worklist id_list; /* buffer holding inode block */
# define id_state id_list.wk_state /* inode dependency state */
LIST_ENTRY(inodedep) id_hash; /* hashed lookup */
struct fs *id_fs; /* associated filesystem */
ufsino_t id_ino; /* dependent inode */
nlink_t id_nlinkdelta; /* saved effective link count */
union { /* Saved UFS1/UFS2 dinode contents */
struct ufs1_dinode *idu_savedino1;
struct ufs2_dinode *idu_savedino2;
} id_un;
size_t id_unsize; /* size of dinode contents union */
LIST_ENTRY(inodedep) id_deps; /* bmsafemap's list of inodedep's */
struct buf *id_buf; /* related bmsafemap (if pending) */
off_t id_savedsize; /* file size saved during rollback */
struct workhead id_pendinghd; /* entries awaiting directory write */
struct workhead id_bufwait; /* operations after inode written */
struct workhead id_inowait; /* operations waiting inode update */
struct allocdirectlst id_inoupdt; /* updates before inode written */
struct allocdirectlst id_newinoupdt; /* updates when inode written */
};
#define id_savedino1 id_un.idu_savedino1
#define id_savedino2 id_un.idu_savedino2
/*
* A "newblk" structure is attached to a bmsafemap structure when a block
* or fragment is allocated from a cylinder group. Its state is set to
* DEPCOMPLETE when its cylinder group map is written. It is consumed by
* an associated allocdirect or allocindir allocation which will attach
* themselves to the bmsafemap structure if the newblk's DEPCOMPLETE flag
* is not set (i.e., its cylinder group map has not been written).
*/
struct newblk {
LIST_ENTRY(newblk) nb_hash; /* hashed lookup */
struct fs *nb_fs; /* associated filesystem */
daddr_t nb_newblkno; /* allocated block number */
int nb_state; /* state of bitmap dependency */
LIST_ENTRY(newblk) nb_deps; /* bmsafemap's list of newblk's */
struct bmsafemap *nb_bmsafemap; /* associated bmsafemap */
};
/*
* A "bmsafemap" structure maintains a list of dependency structures
* that depend on the update of a particular cylinder group map.
* It has lists for newblks, allocdirects, allocindirs, and inodedeps.
* It is attached to the buffer of a cylinder group block when any of
* these things are allocated from the cylinder group. It is freed
* after the cylinder group map is written and the state of its
* dependencies are updated with DEPCOMPLETE to indicate that it has
* been processed.
*/
struct bmsafemap {
struct worklist sm_list; /* cylgrp buffer */
struct buf *sm_buf; /* associated buffer */
struct allocdirecthd sm_allocdirecthd; /* allocdirect deps */
struct allocindirhd sm_allocindirhd; /* allocindir deps */
struct inodedephd sm_inodedephd; /* inodedep deps */
struct newblkhd sm_newblkhd; /* newblk deps */
};
/*
* An "allocdirect" structure is attached to an "inodedep" when a new block
* or fragment is allocated and pointed to by the inode described by
* "inodedep". The worklist is linked to the buffer that holds the block.
* When the block is first allocated, it is linked to the bmsafemap
* structure associated with the buffer holding the cylinder group map
* from which it was allocated. When the cylinder group map is written
* to disk, ad_state has the DEPCOMPLETE flag set. When the block itself
* is written, the COMPLETE flag is set. Once both the cylinder group map
* and the data itself have been written, it is safe to write the inode
* that claims the block. If there was a previous fragment that had been
* allocated before the file was increased in size, the old fragment may
* be freed once the inode claiming the new block is written to disk.
* This ad_fragfree request is attached to the id_inowait list of the
* associated inodedep (pointed to by ad_inodedep) for processing after
* the inode is written. When a block is allocated to a directory, an
* fsync of a file whose name is within that block must ensure not only
* that the block containing the file name has been written, but also
* that the on-disk inode references that block. When a new directory
* block is created, we allocate a newdirblk structure which is linked
* to the associated allocdirect (on its ad_newdirblk list). When the
* allocdirect has been satisfied, the newdirblk structure is moved to
* the inodedep id_bufwait list of its directory to await the inode
* being written. When the inode is written, the directory entries are
* fully committed and can be deleted from their pagedep->id_pendinghd
* and inodedep->id_pendinghd lists.
*/
struct allocdirect {
struct worklist ad_list; /* buffer holding block */
# define ad_state ad_list.wk_state /* block pointer state */
TAILQ_ENTRY(allocdirect) ad_next; /* inodedep's list of allocdirect's */
daddr_t ad_lbn; /* block within file */
daddr_t ad_newblkno; /* new value of block pointer */
daddr_t ad_oldblkno; /* old value of block pointer */
long ad_newsize; /* size of new block */
long ad_oldsize; /* size of old block */
LIST_ENTRY(allocdirect) ad_deps; /* bmsafemap's list of allocdirect's */
struct buf *ad_buf; /* cylgrp buffer (if pending) */
struct inodedep *ad_inodedep; /* associated inodedep */
struct freefrag *ad_freefrag; /* fragment to be freed (if any) */
struct workhead ad_newdirblk; /* dir block to notify when written */
};
/*
* A single "indirdep" structure manages all allocation dependencies for
* pointers in an indirect block. The up-to-date state of the indirect
* block is stored in ir_savedata. The set of pointers that may be safely
* written to the disk is stored in ir_safecopy. The state field is used
* only to track whether the buffer is currently being written (in which
* case it is not safe to update ir_safecopy). Ir_deplisthd contains the
* list of allocindir structures, one for each block that needs to be
* written to disk. Once the block and its bitmap allocation have been
* written the safecopy can be updated to reflect the allocation and the
* allocindir structure freed. If ir_state indicates that an I/O on the
* indirect block is in progress when ir_safecopy is to be updated, the
* update is deferred by placing the allocindir on the ir_donehd list.
* When the I/O on the indirect block completes, the entries on the
* ir_donehd list are processed by updating their corresponding ir_safecopy
* pointers and then freeing the allocindir structure.
*/
struct indirdep {
struct worklist ir_list; /* buffer holding indirect block */
# define ir_state ir_list.wk_state /* indirect block pointer state */
caddr_t ir_saveddata; /* buffer cache contents */
struct buf *ir_savebp; /* buffer holding safe copy */
struct allocindirhd ir_donehd; /* done waiting to update safecopy */
struct allocindirhd ir_deplisthd; /* allocindir deps for this block */
};
/*
* An "allocindir" structure is attached to an "indirdep" when a new block
* is allocated and pointed to by the indirect block described by the
* "indirdep". The worklist is linked to the buffer that holds the new block.
* When the block is first allocated, it is linked to the bmsafemap
* structure associated with the buffer holding the cylinder group map
* from which it was allocated. When the cylinder group map is written
* to disk, ai_state has the DEPCOMPLETE flag set. When the block itself
* is written, the COMPLETE flag is set. Once both the cylinder group map
* and the data itself have been written, it is safe to write the entry in
* the indirect block that claims the block; the "allocindir" dependency
* can then be freed as it is no longer applicable.
*/
struct allocindir {
struct worklist ai_list; /* buffer holding indirect block */
# define ai_state ai_list.wk_state /* indirect block pointer state */
LIST_ENTRY(allocindir) ai_next; /* indirdep's list of allocindir's */
int ai_offset; /* pointer offset in indirect block */
daddr_t ai_newblkno; /* new block pointer value */
daddr_t ai_oldblkno; /* old block pointer value */
struct freefrag *ai_freefrag; /* block to be freed when complete */
struct indirdep *ai_indirdep; /* address of associated indirdep */
LIST_ENTRY(allocindir) ai_deps; /* bmsafemap's list of allocindir's */
struct buf *ai_buf; /* cylgrp buffer (if pending) */
};
/*
* A "freefrag" structure is attached to an "inodedep" when a previously
* allocated fragment is replaced with a larger fragment, rather than extended.
* The "freefrag" structure is constructed and attached when the replacement
* block is first allocated. It is processed after the inode claiming the
* bigger block that replaces it has been written to disk. Note that the
* ff_state field is used to store the uid, so may lose data. However,
* the uid is used only in printing an error message, so is not critical.
* Keeping it in a short keeps the data structure down to 32 bytes.
*/
struct freefrag {
struct worklist ff_list; /* id_inowait or delayed worklist */
# define ff_state ff_list.wk_state /* owning user; should be uid_t */
struct vnode *ff_devvp; /* filesystem device vnode */
struct mount *ff_mnt; /* associated mount point */
daddr_t ff_blkno; /* fragment physical block number */
long ff_fragsize; /* size of fragment being deleted */
ufsino_t ff_inum; /* owning inode number */
};
/*
* A "freeblks" structure is attached to an "inodedep" when the
* corresponding file's length is reduced to zero. It records all
* the information needed to free the blocks of a file after its
* zero'ed inode has been written to disk.
*/
struct freeblks {
struct worklist fb_list; /* id_inowait or delayed worklist */
# define fb_state fb_list.wk_state /* inode and dirty block state */
ufsino_t fb_previousinum; /* inode of previous owner of blocks */
struct vnode *fb_devvp; /* filesystem device vnode */
struct mount *fb_mnt; /* associated mount point */
off_t fb_oldsize; /* previous file size */
off_t fb_newsize; /* new file size */
int fb_chkcnt; /* used to check cnt of blks released */
uid_t fb_uid; /* uid of previous owner of blocks */
daddr_t fb_dblks[NDADDR]; /* direct blk ptrs to deallocate */
daddr_t fb_iblks[NIADDR]; /* indirect blk ptrs to deallocate */
};
/*
* A "freefile" structure is attached to an inode when its
* link count is reduced to zero. It marks the inode as free in
* the cylinder group map after the zero'ed inode has been written
* to disk and any associated blocks and fragments have been freed.
*/
struct freefile {
struct worklist fx_list; /* id_inowait or delayed worklist */
mode_t fx_mode; /* mode of inode */
ufsino_t fx_oldinum; /* inum of the unlinked file */
struct vnode *fx_devvp; /* filesystem device vnode */
struct mount *fx_mnt; /* associated mount point */
};
/*
* A "diradd" structure is linked to an "inodedep" id_inowait list when a
* new directory entry is allocated that references the inode described
* by "inodedep". When the inode itself is written (either the initial
* allocation for new inodes or with the increased link count for
* existing inodes), the COMPLETE flag is set in da_state. If the entry
* is for a newly allocated inode, the "inodedep" structure is associated
* with a bmsafemap which prevents the inode from being written to disk
* until the cylinder group has been updated. Thus the da_state COMPLETE
* flag cannot be set until the inode bitmap dependency has been removed.
* When creating a new file, it is safe to write the directory entry that
* claims the inode once the referenced inode has been written. Since
* writing the inode clears the bitmap dependencies, the DEPCOMPLETE flag
* in the diradd can be set unconditionally when creating a file. When
* creating a directory, there are two additional dependencies described by
* mkdir structures (see their description below). When these dependencies
* are resolved the DEPCOMPLETE flag is set in the diradd structure.
* If there are multiple links created to the same inode, there will be
* a separate diradd structure created for each link. The diradd is
* linked onto the pg_diraddhd list of the pagedep for the directory
* page that contains the entry. When a directory page is written,
* the pg_diraddhd list is traversed to rollback any entries that are
* not yet ready to be written to disk. If a directory entry is being
* changed (by rename) rather than added, the DIRCHG flag is set and
* the da_previous entry points to the entry that will be "removed"
* once the new entry has been committed. During rollback, entries
* with da_previous are replaced with the previous inode number rather
* than zero.
*
* The overlaying of da_pagedep and da_previous is done to keep the
* structure down to 32 bytes in size on a 32-bit machine. If a
* da_previous entry is present, the pointer to its pagedep is available
* in the associated dirrem entry. If the DIRCHG flag is set, the
* da_previous entry is valid; if not set the da_pagedep entry is valid.
* The DIRCHG flag never changes; it is set when the structure is created
* if appropriate and is never cleared.
*/
struct diradd {
struct worklist da_list; /* id_inowait or id_pendinghd list */
# define da_state da_list.wk_state /* state of the new directory entry */
LIST_ENTRY(diradd) da_pdlist; /* pagedep holding directory block */
doff_t da_offset; /* offset of new dir entry in dir blk */
ufsino_t da_newinum; /* inode number for the new dir entry */
union {
struct dirrem *dau_previous; /* entry being replaced in dir change */
struct pagedep *dau_pagedep; /* pagedep dependency for addition */
} da_un;
};
#define da_previous da_un.dau_previous
#define da_pagedep da_un.dau_pagedep
/*
* Two "mkdir" structures are needed to track the additional dependencies
* associated with creating a new directory entry. Normally a directory
* addition can be committed as soon as the newly referenced inode has been
* written to disk with its increased link count. When a directory is
* created there are two additional dependencies: writing the directory
* data block containing the "." and ".." entries (MKDIR_BODY) and writing
* the parent inode with the increased link count for ".." (MKDIR_PARENT).
* These additional dependencies are tracked by two mkdir structures that
* reference the associated "diradd" structure. When they have completed,
* they set the DEPCOMPLETE flag on the diradd so that it knows that its
* extra dependencies have been completed. The md_state field is used only
* to identify which type of dependency the mkdir structure is tracking.
* It is not used in the mainline code for any purpose other than consistency
* checking. All the mkdir structures in the system are linked together on
* a list. This list is needed so that a diradd can find its associated
* mkdir structures and deallocate them if it is prematurely freed (as for
* example if a mkdir is immediately followed by a rmdir of the same directory).
* Here, the free of the diradd must traverse the list to find the associated
* mkdir structures that reference it. The deletion would be faster if the
* diradd structure were simply augmented to have two pointers that referenced
* the associated mkdir's. However, this would increase the size of the diradd
* structure from 32 to 64-bits to speed a very infrequent operation.
*/
struct mkdir {
struct worklist md_list; /* id_inowait or buffer holding dir */
# define md_state md_list.wk_state /* type: MKDIR_PARENT or MKDIR_BODY */
struct diradd *md_diradd; /* associated diradd */
struct buf *md_buf; /* MKDIR_BODY: buffer holding dir */
LIST_ENTRY(mkdir) md_mkdirs; /* list of all mkdirs */
};
LIST_HEAD(mkdirlist, mkdir) mkdirlisthd;
/*
* A "dirrem" structure describes an operation to decrement the link
* count on an inode. The dirrem structure is attached to the pg_dirremhd
* list of the pagedep for the directory page that contains the entry.
* It is processed after the directory page with the deleted entry has
* been written to disk.
*
* The overlaying of dm_pagedep and dm_dirinum is done to keep the
* structure down to 32 bytes in size on a 32-bit machine. It works
* because they are never used concurrently.
*/
struct dirrem {
struct worklist dm_list; /* delayed worklist */
# define dm_state dm_list.wk_state /* state of the old directory entry */
LIST_ENTRY(dirrem) dm_next; /* pagedep's list of dirrem's */
struct mount *dm_mnt; /* associated mount point */
ufsino_t dm_oldinum; /* inum of the removed dir entry */
union {
struct pagedep *dmu_pagedep; /* pagedep dependency for remove */
ufsino_t dmu_dirinum; /* parent inode number (for rmdir) */
} dm_un;
};
#define dm_pagedep dm_un.dmu_pagedep
#define dm_dirinum dm_un.dmu_dirinum
/*
* A "newdirblk" structure tracks the progress of a newly allocated
* directory block from its creation until it is claimed by its on-disk
* inode. When a block is allocated to a directory, an fsync of a file
* whose name is within that block must ensure not only that the block
* containing the file name has been written, but also that the on-disk
* inode references that block. When a new directory block is created,
* we allocate a newdirblk structure which is linked to the associated
* allocdirect (on its ad_newdirblk list). When the allocdirect has been
* satisfied, the newdirblk structure is moved to the inodedep id_bufwait
* list of its directory to await the inode being written. When the inode
* is written, the directory entries are fully committed and can be
* deleted from their pagedep->id_pendinghd and inodedep->id_pendinghd
* lists. Note that we could track directory blocks allocated to indirect
* blocks using a similar scheme with the allocindir structures. Rather
* than adding this level of complexity, we simply write those newly
* allocated indirect blocks synchronously as such allocations are rare.
*/
struct newdirblk {
struct worklist db_list;/* id_inowait or pg_newdirblk */
# define db_state db_list.wk_state /* unused */
struct pagedep *db_pagedep;/* associated pagedep */
};

View file

@ -1,4 +1,4 @@
/* $OpenBSD: inode.h,v 1.53 2020/06/24 22:03:45 cheloha Exp $ */
/* $OpenBSD: inode.h,v 1.54 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: inode.h,v 1.8 1995/06/15 23:22:50 cgd Exp $ */
/*
@ -332,11 +332,6 @@ struct indir {
} while (0)
/* Determine if soft dependencies are being done */
#ifdef FFS_SOFTUPDATES
#define DOINGSOFTDEP(vp) ((vp)->v_mount->mnt_flag & MNT_SOFTDEP)
#else
#define DOINGSOFTDEP(vp) (0)
#endif
#define DOINGASYNC(vp) ((vp)->v_mount->mnt_flag & MNT_ASYNC)
/* This overlays the fid structure (see mount.h). */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ufs_extern.h,v 1.38 2022/06/26 05:20:43 visa Exp $ */
/* $OpenBSD: ufs_extern.h,v 1.39 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: ufs_extern.h,v 1.5 1996/02/09 22:36:03 christos Exp $ */
/*-
@ -132,18 +132,3 @@ int ufs_check_export(struct mount *, struct mbuf *, int *,
void ufs_itimes(struct vnode *);
int ufs_makeinode(int, struct vnode *, struct vnode **,
struct componentname *);
/*
* Soft dependency function prototypes.
*/
int softdep_setup_directory_add(struct buf *, struct inode *, off_t,
long, struct buf *, int);
void softdep_change_directoryentry_offset(struct inode *, caddr_t,
caddr_t, caddr_t, int);
void softdep_setup_remove(struct buf *,struct inode *, struct inode *,
int);
void softdep_setup_directory_change(struct buf *, struct inode *,
struct inode *, long, int);
void softdep_change_linkcnt(struct inode *, int);
int softdep_slowdown(struct vnode *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ufs_inode.c,v 1.44 2020/02/27 09:10:31 mpi Exp $ */
/* $OpenBSD: ufs_inode.c,v 1.45 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: ufs_inode.c,v 1.7 1996/05/11 18:27:52 mycroft Exp $ */
/*
@ -88,21 +88,6 @@ ufs_inactive(void *v)
DIP_ASSIGN(ip, mode, 0);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
/*
* Setting the mode to zero needs to wait for the inode to be
* written just as does a change to the link count. So, rather
* than creating a new entry point to do the same thing, we
* just use softdep_change_linkcnt(). Also, we can't let
* softdep co-opt us to help on its worklist, as we may end up
* trying to recycle vnodes and getting to this same point a
* couple of times, blowing the kernel stack. However, this
* could be optimized by checking if we are coming from
* vrele(), vput() or vclean() (by checking for VXLOCK) and
* just avoiding the co-opt to happen in the last case.
*/
if (DOINGSOFTDEP(vp))
softdep_change_linkcnt(ip, 1);
UFS_INODE_FREE(ip, ip->i_number, mode);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ufs_lookup.c,v 1.60 2024/01/09 03:15:59 guenther Exp $ */
/* $OpenBSD: ufs_lookup.c,v 1.61 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: ufs_lookup.c,v 1.7 1996/02/09 22:36:06 christos Exp $ */
/*
@ -700,12 +700,9 @@ ufs_direnter(struct vnode *dvp, struct vnode *tvp, struct direct *dirp,
if (dp->i_offset & (DIRBLKSIZ - 1))
panic("ufs_direnter: newblk");
flags = B_CLRBUF;
if (!DOINGSOFTDEP(dvp))
flags |= B_SYNC;
flags |= B_SYNC;
if ((error = UFS_BUF_ALLOC(dp, (off_t)dp->i_offset, DIRBLKSIZ,
cr, flags, &bp)) != 0) {
if (DOINGSOFTDEP(dvp) && newdirbp != NULL)
bdwrite(newdirbp);
return (error);
}
DIP_ASSIGN(dp, size, dp->i_offset + DIRBLKSIZ);
@ -725,45 +722,8 @@ ufs_direnter(struct vnode *dvp, struct vnode *tvp, struct direct *dirp,
}
#endif
if (DOINGSOFTDEP(dvp)) {
/*
* Ensure that the entire newly allocated block is a
* valid directory so that future growth within the
* block does not have to ensure that the block is
* written before the inode.
*/
blkoff += DIRBLKSIZ;
while (blkoff < bp->b_bcount) {
((struct direct *)
(bp->b_data + blkoff))->d_reclen = DIRBLKSIZ;
blkoff += DIRBLKSIZ;
}
if (softdep_setup_directory_add(bp, dp, dp->i_offset,
dirp->d_ino, newdirbp, 1) == 0) {
bdwrite(bp);
return (UFS_UPDATE(dp, 0));
}
/* We have just allocated a directory block in an
* indirect block. Rather than tracking when it gets
* claimed by the inode, we simply do a VOP_FSYNC
* now to ensure that it is there (in case the user
* does a future fsync). Note that we have to unlock
* the inode for the entry that we just entered, as
* the VOP_FSYNC may need to lock other inodes which
* can lead to deadlock if we also hold a lock on
* the newly entered node.
*/
if ((error = VOP_BWRITE(bp)))
return (error);
if (tvp != NULL)
VOP_UNLOCK(tvp);
error = VOP_FSYNC(dvp, p->p_ucred, MNT_WAIT, p);
if (tvp != NULL)
vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
return (error);
}
error = VOP_BWRITE(bp);
ret = UFS_UPDATE(dp, !DOINGSOFTDEP(dvp));
ret = UFS_UPDATE(dp, 1);
if (error == 0)
return (ret);
return (error);
@ -792,8 +752,6 @@ ufs_direnter(struct vnode *dvp, struct vnode *tvp, struct direct *dirp,
*/
if ((error = UFS_BUFATOFF(dp, (off_t)dp->i_offset, &dirbuf, &bp))
!= 0) {
if (DOINGSOFTDEP(dvp) && newdirbp != NULL)
bdwrite(newdirbp);
return (error);
}
/*
@ -836,11 +794,7 @@ ufs_direnter(struct vnode *dvp, struct vnode *tvp, struct direct *dirp,
dp->i_offset + ((char *)nep - dirbuf),
dp->i_offset + ((char *)ep - dirbuf));
#endif
if (DOINGSOFTDEP(dvp))
softdep_change_directoryentry_offset(dp, dirbuf,
(caddr_t)nep, (caddr_t)ep, dsize);
else
memmove(ep, nep, dsize);
memmove(ep, nep, dsize);
}
/*
* Here, `ep' points to a directory entry containing `dsize' in-use
@ -876,14 +830,7 @@ ufs_direnter(struct vnode *dvp, struct vnode *tvp, struct direct *dirp,
dp->i_offset & ~(DIRBLKSIZ - 1));
#endif
if (DOINGSOFTDEP(dvp)) {
(void)softdep_setup_directory_add(bp, dp,
dp->i_offset + (caddr_t)ep - dirbuf,
dirp->d_ino, newdirbp, 0);
bdwrite(bp);
} else {
error = VOP_BWRITE(bp);
}
error = VOP_BWRITE(bp);
dp->i_flag |= IN_CHANGE | IN_UPDATE;
/*
@ -960,30 +907,17 @@ ufs_dirremove(struct vnode *dvp, struct inode *ip, int flags, int isrmdir)
((dp->i_offset - dp->i_count) & (DIRBLKSIZ - 1)),
dp->i_offset & ~(DIRBLKSIZ - 1));
#endif
if (DOINGSOFTDEP(dvp)) {
if (ip) {
ip->i_effnlink--;
softdep_change_linkcnt(ip, 0);
softdep_setup_remove(bp, dp, ip, isrmdir);
}
if (softdep_slowdown(dvp)) {
error = bwrite(bp);
} else {
bdwrite(bp);
error = 0;
}
} else {
if (ip) {
ip->i_effnlink--;
DIP_ADD(ip, nlink, -1);
ip->i_flag |= IN_CHANGE;
}
if (DOINGASYNC(dvp) && dp->i_count != 0) {
bdwrite(bp);
error = 0;
} else
error = bwrite(bp);
if (ip) {
ip->i_effnlink--;
DIP_ADD(ip, nlink, -1);
ip->i_flag |= IN_CHANGE;
}
if (DOINGASYNC(dvp) && dp->i_count != 0) {
bdwrite(bp);
error = 0;
} else
error = bwrite(bp);
dp->i_flag |= IN_CHANGE | IN_UPDATE;
return (error);
}
@ -1008,19 +942,13 @@ ufs_dirrewrite(struct inode *dp, struct inode *oip, ufsino_t newinum,
ep->d_ino = newinum;
ep->d_type = newtype;
oip->i_effnlink--;
if (DOINGSOFTDEP(vdp)) {
softdep_change_linkcnt(oip, 0);
softdep_setup_directory_change(bp, dp, oip, newinum, isrmdir);
DIP_ADD(oip, nlink, -1);
oip->i_flag |= IN_CHANGE;
if (DOINGASYNC(vdp)) {
bdwrite(bp);
error = 0;
} else {
DIP_ADD(oip, nlink, -1);
oip->i_flag |= IN_CHANGE;
if (DOINGASYNC(vdp)) {
bdwrite(bp);
error = 0;
} else {
error = VOP_BWRITE(bp);
}
error = VOP_BWRITE(bp);
}
dp->i_flag |= IN_CHANGE | IN_UPDATE;
return (error);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ufs_vnops.c,v 1.159 2024/01/09 03:15:59 guenther Exp $ */
/* $OpenBSD: ufs_vnops.c,v 1.160 2024/02/03 18:51:59 beck Exp $ */
/* $NetBSD: ufs_vnops.c,v 1.18 1996/05/11 18:28:04 mycroft Exp $ */
/*
@ -109,7 +109,7 @@ ufs_itimes(struct vnode *vp)
}
#endif
if ((vp->v_type == VBLK || vp->v_type == VCHR) && !DOINGSOFTDEP(vp))
if ((vp->v_type == VBLK || vp->v_type == VCHR))
ip->i_flag |= IN_LAZYMOD;
else
ip->i_flag |= IN_MODIFIED;
@ -649,9 +649,7 @@ ufs_link(void *v)
ip->i_effnlink++;
DIP_ADD(ip, nlink, 1);
ip->i_flag |= IN_CHANGE;
if (DOINGSOFTDEP(vp))
softdep_change_linkcnt(ip, 0);
if ((error = UFS_UPDATE(ip, !DOINGSOFTDEP(vp))) == 0) {
if ((error = UFS_UPDATE(ip, 1)) == 0) {
ufs_makedirentry(ip, cnp, &newdir);
error = ufs_direnter(dvp, vp, &newdir, cnp, NULL);
}
@ -659,8 +657,6 @@ ufs_link(void *v)
ip->i_effnlink--;
DIP_ADD(ip, nlink, -1);
ip->i_flag |= IN_CHANGE;
if (DOINGSOFTDEP(vp))
softdep_change_linkcnt(ip, 0);
}
pool_put(&namei_pool, cnp->cn_pnbuf);
VN_KNOTE(vp, NOTE_LINK);
@ -846,9 +842,7 @@ abortit:
ip->i_effnlink++;
DIP_ADD(ip, nlink, 1);
ip->i_flag |= IN_CHANGE;
if (DOINGSOFTDEP(fvp))
softdep_change_linkcnt(ip, 0);
if ((error = UFS_UPDATE(ip, !DOINGSOFTDEP(fvp))) != 0) {
if ((error = UFS_UPDATE(ip, 1)) != 0) {
VOP_UNLOCK(fvp);
goto bad;
}
@ -916,15 +910,11 @@ abortit:
dp->i_effnlink++;
DIP_ADD(dp, nlink, 1);
dp->i_flag |= IN_CHANGE;
if (DOINGSOFTDEP(tdvp))
softdep_change_linkcnt(dp, 0);
if ((error = UFS_UPDATE(dp, !DOINGSOFTDEP(tdvp)))
if ((error = UFS_UPDATE(dp, 1))
!= 0) {
dp->i_effnlink--;
DIP_ADD(dp, nlink, -1);
dp->i_flag |= IN_CHANGE;
if (DOINGSOFTDEP(tdvp))
softdep_change_linkcnt(dp, 0);
goto bad;
}
}
@ -934,8 +924,6 @@ abortit:
dp->i_effnlink--;
DIP_ADD(dp, nlink, -1);
dp->i_flag |= IN_CHANGE;
if (DOINGSOFTDEP(tdvp))
softdep_change_linkcnt(dp, 0);
(void)UFS_UPDATE(dp, 1);
}
goto bad;
@ -991,14 +979,10 @@ abortit:
if (doingdirectory) {
if (!newparent) {
dp->i_effnlink--;
if (DOINGSOFTDEP(tdvp))
softdep_change_linkcnt(dp, 0);
}
xp->i_effnlink--;
if (DOINGSOFTDEP(tvp))
softdep_change_linkcnt(xp, 0);
}
if (doingdirectory && !DOINGSOFTDEP(tvp)) {
if (doingdirectory) {
/*
* Truncate inode. The only stuff left in the directory
* is "." and "..". The "." reference is inconsequential
@ -1101,8 +1085,6 @@ out:
DIP_ADD(ip, nlink, -1);
ip->i_flag |= IN_CHANGE;
ip->i_flag &= ~IN_RENAME;
if (DOINGSOFTDEP(fvp))
softdep_change_linkcnt(ip, 0);
vput(fvp);
} else
vrele(fvp);
@ -1124,7 +1106,7 @@ ufs_mkdir(void *v)
struct buf *bp;
struct direct newdir;
struct dirtemplate dirtemplate;
int error, dmode, blkoff;
int error, dmode;
#ifdef DIAGNOSTIC
if ((cnp->cn_flags & HASBUF) == 0)
@ -1164,8 +1146,6 @@ ufs_mkdir(void *v)
tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */
ip->i_effnlink = 2;
DIP_ASSIGN(ip, nlink, 2);
if (DOINGSOFTDEP(tvp))
softdep_change_linkcnt(ip, 0);
/*
* Bump link count in parent directory to reflect work done below.
@ -1175,9 +1155,7 @@ ufs_mkdir(void *v)
dp->i_effnlink++;
DIP_ADD(dp, nlink, 1);
dp->i_flag |= IN_CHANGE;
if (DOINGSOFTDEP(dvp))
softdep_change_linkcnt(dp, 0);
if ((error = UFS_UPDATE(dp, !DOINGSOFTDEP(dvp))) != 0)
if ((error = UFS_UPDATE(dp, 1)) != 0)
goto bad;
/*
@ -1194,21 +1172,7 @@ ufs_mkdir(void *v)
ip->i_flag |= IN_CHANGE | IN_UPDATE;
uvm_vnp_setsize(tvp, DIP(ip, size));
memcpy(bp->b_data, &dirtemplate, sizeof(dirtemplate));
if (DOINGSOFTDEP(tvp)) {
/*
* Ensure that the entire newly allocated block is a
* valid directory so that future growth within the
* block does not have to ensure that the block is
* written before the inode
*/
blkoff = DIRBLKSIZ;
while (blkoff < bp->b_bcount) {
((struct direct *)
(bp->b_data + blkoff))->d_reclen = DIRBLKSIZ;
blkoff += DIRBLKSIZ;
}
}
if ((error = UFS_UPDATE(ip, !DOINGSOFTDEP(tvp))) != 0) {
if ((error = UFS_UPDATE(ip, 1)) != 0) {
(void)VOP_BWRITE(bp);
goto bad;
}
@ -1224,7 +1188,7 @@ ufs_mkdir(void *v)
* an appropriate ordering dependency to the buffer which ensures that
* the buffer is written before the new name is written in the parent.
*/
if (!DOINGSOFTDEP(dvp) && ((error = VOP_BWRITE(bp)) != 0))
if ((error = VOP_BWRITE(bp)) != 0)
goto bad;
ufs_makedirentry(ip, cnp, &newdir);
error = ufs_direnter(dvp, tvp, &newdir, cnp, bp);
@ -1237,8 +1201,6 @@ bad:
dp->i_effnlink--;
DIP_ADD(dp, nlink, -1);
dp->i_flag |= IN_CHANGE;
if (DOINGSOFTDEP(dvp))
softdep_change_linkcnt(dp, 0);
/*
* No need to do an explicit VOP_TRUNCATE here, vrele will
* do this for us because we set the link count to 0.
@ -1246,8 +1208,6 @@ bad:
ip->i_effnlink = 0;
DIP_ASSIGN(ip, nlink, 0);
ip->i_flag |= IN_CHANGE;
if (DOINGSOFTDEP(tvp))
softdep_change_linkcnt(ip, 0);
vput(tvp);
}
out:
@ -1300,17 +1260,9 @@ ufs_rmdir(void *v)
*/
dp->i_effnlink--;
ip->i_effnlink--;
if (DOINGSOFTDEP(vp)) {
softdep_change_linkcnt(dp, 0);
softdep_change_linkcnt(ip, 0);
}
if ((error = ufs_dirremove(dvp, ip, cnp->cn_flags, 1)) != 0) {
dp->i_effnlink++;
ip->i_effnlink++;
if (DOINGSOFTDEP(vp)) {
softdep_change_linkcnt(dp, 0);
softdep_change_linkcnt(ip, 0);
}
goto out;
}
@ -1319,20 +1271,15 @@ ufs_rmdir(void *v)
/*
* Truncate inode. The only stuff left in the directory is "." and
* "..". The "." reference is inconsequential since we are quashing
* it. The soft dependency code will arrange to do these operations
* after the parent directory entry has been deleted on disk, so
* when running with that code we avoid doing them now.
* it.
*/
if (!DOINGSOFTDEP(vp)) {
int ioflag;
DIP_ADD(dp, nlink, -1);
dp->i_flag |= IN_CHANGE;
DIP_ADD(ip, nlink, -1);
ip->i_flag |= IN_CHANGE;
error = UFS_TRUNCATE(ip, (off_t)0, DOINGASYNC(vp) ? 0 : IO_SYNC,
cnp->cn_cred);
DIP_ADD(dp, nlink, -1);
dp->i_flag |= IN_CHANGE;
DIP_ADD(ip, nlink, -1);
ip->i_flag |= IN_CHANGE;
ioflag = DOINGASYNC(vp) ? 0 : IO_SYNC;
error = UFS_TRUNCATE(ip, (off_t)0, ioflag, cnp->cn_cred);
}
cache_purge(vp);
#ifdef UFS_DIRHASH
/* Kill any active hash; i_effnlink == 0, so it will not come back. */
@ -1816,8 +1763,6 @@ ufs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
tvp->v_type = IFTOVT(mode); /* Rest init'd in getnewvnode(). */
ip->i_effnlink = 1;
DIP_ASSIGN(ip, nlink, 1);
if (DOINGSOFTDEP(tvp))
softdep_change_linkcnt(ip, 0);
if ((DIP(ip, mode) & ISGID) &&
!groupmember(DIP(ip, gid), cnp->cn_cred) &&
!vnoperm(dvp) &&
@ -1827,7 +1772,7 @@ ufs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
/*
* Make sure inode goes to disk before directory entry.
*/
if ((error = UFS_UPDATE(ip, !DOINGSOFTDEP(tvp))) != 0)
if ((error = UFS_UPDATE(ip, 1)) != 0)
goto bad;
ufs_makedirentry(ip, cnp, &newdir);
@ -1848,8 +1793,6 @@ bad:
ip->i_effnlink = 0;
DIP_ASSIGN(ip, nlink, 0);
ip->i_flag |= IN_CHANGE;
if (DOINGSOFTDEP(tvp))
softdep_change_linkcnt(ip, 0);
tvp->v_type = VNON;
vput(tvp);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_swap.c,v 1.168 2023/12/05 15:50:45 claudio Exp $ */
/* $OpenBSD: uvm_swap.c,v 1.169 2024/02/03 18:51:59 beck Exp $ */
/* $NetBSD: uvm_swap.c,v 1.40 2000/11/17 11:39:39 mrg Exp $ */
/*
@ -1277,7 +1277,6 @@ sw_reg_strategy(struct swapdev *sdp, struct buf *bp, int bn)
nbp->vb_buf.b_iodone = sw_reg_iodone;
nbp->vb_buf.b_vp = NULLVP;
nbp->vb_buf.b_vnbufs.le_next = NOLIST;
LIST_INIT(&nbp->vb_buf.b_dep);
/*
* set b_dirtyoff/end and b_validoff/end. this is
@ -1863,7 +1862,6 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
bp->b_data = (caddr_t)kva;
bp->b_bq = NULL;
bp->b_blkno = startblk;
LIST_INIT(&bp->b_dep);
s = splbio();
bp->b_vp = NULL;
buf_replacevnode(bp, swapdev_vp);