sync with OpenBSD -current
This commit is contained in:
parent
d93a7459f8
commit
a8049e67d3
96 changed files with 5193 additions and 559 deletions
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: db_disasm.c,v 1.25 2024/06/06 12:36:41 bluhm Exp $ */
|
||||
/* $OpenBSD: db_disasm.c,v 1.26 2024/07/09 01:21:19 jsg Exp $ */
|
||||
/* $NetBSD: db_disasm.c,v 1.11 1996/05/03 19:41:58 christos Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -409,8 +409,8 @@ struct finst db_Esca[] = {
|
|||
/*1*/ { "fimul", LONG, 0, 0 },
|
||||
/*2*/ { "ficom", LONG, 0, 0 },
|
||||
/*3*/ { "ficomp", LONG, 0, 0 },
|
||||
/*4*/ { "fisub", LONG, op1(X), 0 },
|
||||
/*5*/ { "fisubr", LONG, 0, 0 },
|
||||
/*4*/ { "fisub", LONG, 0, 0 },
|
||||
/*5*/ { "fisubr", LONG, op1(X), db_Esca5 },
|
||||
/*6*/ { "fidiv", LONG, 0, 0 },
|
||||
/*7*/ { "fidivr", LONG, 0, 0 }
|
||||
};
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: pmap.c,v 1.168 2024/06/03 20:53:00 dv Exp $ */
|
||||
/* $OpenBSD: pmap.c,v 1.169 2024/07/09 19:11:06 bluhm Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -235,6 +235,11 @@ pt_entry_t pg_g_kern = 0;
|
|||
/* pg_xo: XO PTE bits, set to PKU key1 (if cpu supports PKU) */
|
||||
pt_entry_t pg_xo;
|
||||
|
||||
/* pg_crypt, pg_frame, pg_lgframe: will be derived from CPUID */
|
||||
pt_entry_t pg_crypt = 0;
|
||||
pt_entry_t pg_frame = PG_FRAME;
|
||||
pt_entry_t pg_lgframe = PG_LGFRAME;
|
||||
|
||||
/*
|
||||
* pmap_pg_wc: if our processor supports PAT then we set this
|
||||
* to be the pte bits for Write Combining. Else we fall back to
|
||||
|
@ -465,7 +470,7 @@ pmap_find_pte_direct(struct pmap *pm, vaddr_t va, pt_entry_t **pd, int *offs)
|
|||
if ((pde & (PG_PS|PG_V)) != PG_V)
|
||||
return (lev - 1);
|
||||
|
||||
pdpa = ((*pd)[*offs] & PG_FRAME);
|
||||
pdpa = ((*pd)[*offs] & pg_frame);
|
||||
/* 4096/8 == 512 == 2^9 entries per level */
|
||||
shift -= 9;
|
||||
mask >>= 9;
|
||||
|
@ -498,7 +503,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
|
|||
|
||||
npte = (pa & PMAP_PA_MASK) | ((prot & PROT_WRITE) ? PG_RW : PG_RO) |
|
||||
((pa & PMAP_NOCACHE) ? PG_N : 0) |
|
||||
((pa & PMAP_WC) ? pmap_pg_wc : 0) | PG_V;
|
||||
((pa & PMAP_WC) ? pmap_pg_wc : 0) | PG_V |
|
||||
((pa & PMAP_NOCRYPT) ? 0 : pg_crypt);
|
||||
|
||||
/* special 1:1 mappings in the first 2MB must not be global */
|
||||
if (va >= (vaddr_t)NBPD_L2)
|
||||
|
@ -513,7 +519,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
|
|||
panic("%s: PG_PS", __func__);
|
||||
#endif
|
||||
if (pmap_valid_entry(opte)) {
|
||||
if (pa & PMAP_NOCACHE && (opte & PG_N) == 0)
|
||||
if ((pa & PMAP_NOCACHE && (opte & PG_N) == 0) ||
|
||||
(pa & PMAP_NOCRYPT))
|
||||
wbinvd_on_all_cpus();
|
||||
/* This shouldn't happen */
|
||||
pmap_tlb_shootpage(pmap_kernel(), va, 1);
|
||||
|
@ -582,7 +589,8 @@ pmap_set_pml4_early(paddr_t pa)
|
|||
vaddr_t va;
|
||||
|
||||
pml4e = (pt_entry_t *)(proc0.p_addr->u_pcb.pcb_cr3 + KERNBASE);
|
||||
pml4e[PDIR_SLOT_EARLY] = (pd_entry_t)early_pte_pages | PG_V | PG_RW;
|
||||
pml4e[PDIR_SLOT_EARLY] = (pd_entry_t)early_pte_pages | PG_V | PG_RW |
|
||||
pg_crypt;
|
||||
|
||||
off = pa & PAGE_MASK_L2;
|
||||
curpa = pa & L2_FRAME;
|
||||
|
@ -590,15 +598,16 @@ pmap_set_pml4_early(paddr_t pa)
|
|||
pte = (pt_entry_t *)PMAP_DIRECT_MAP(early_pte_pages);
|
||||
memset(pte, 0, 3 * NBPG);
|
||||
|
||||
pte[0] = (early_pte_pages + NBPG) | PG_V | PG_RW;
|
||||
pte[1] = (early_pte_pages + 2 * NBPG) | PG_V | PG_RW;
|
||||
pte[0] = (early_pte_pages + NBPG) | PG_V | PG_RW | pg_crypt;
|
||||
pte[1] = (early_pte_pages + 2 * NBPG) | PG_V | PG_RW | pg_crypt;
|
||||
|
||||
pte = (pt_entry_t *)PMAP_DIRECT_MAP(early_pte_pages + NBPG);
|
||||
for (i = 0; i < 2; i++) {
|
||||
/* 2 early pages of mappings */
|
||||
for (j = 0; j < 512; j++) {
|
||||
/* j[0..511] : 2MB mappings per page */
|
||||
pte[(i * 512) + j] = curpa | PG_V | PG_RW | PG_PS;
|
||||
pte[(i * 512) + j] = curpa | PG_V | PG_RW | PG_PS |
|
||||
pg_crypt;
|
||||
curpa += (2 * 1024 * 1024);
|
||||
}
|
||||
}
|
||||
|
@ -777,7 +786,7 @@ pmap_bootstrap(paddr_t first_avail, paddr_t max_pa)
|
|||
if (ndmpdp > 512)
|
||||
ndmpdp = 512; /* At most 512GB */
|
||||
|
||||
dmpdp = kpm->pm_pdir[PDIR_SLOT_DIRECT] & PG_FRAME;
|
||||
dmpdp = kpm->pm_pdir[PDIR_SLOT_DIRECT] & pg_frame;
|
||||
|
||||
dmpd = first_avail; first_avail += ndmpdp * PAGE_SIZE;
|
||||
|
||||
|
@ -790,7 +799,7 @@ pmap_bootstrap(paddr_t first_avail, paddr_t max_pa)
|
|||
|
||||
*((pd_entry_t *)va) = ((paddr_t)i << L2_SHIFT);
|
||||
*((pd_entry_t *)va) |= PG_RW | PG_V | PG_PS | pg_g_kern | PG_U |
|
||||
PG_M | pg_nx;
|
||||
PG_M | pg_nx | pg_crypt;
|
||||
}
|
||||
|
||||
for (i = NDML2_ENTRIES; i < ndmpdp; i++) {
|
||||
|
@ -801,11 +810,12 @@ pmap_bootstrap(paddr_t first_avail, paddr_t max_pa)
|
|||
va = PMAP_DIRECT_MAP(pdp);
|
||||
|
||||
*((pd_entry_t *)va) = dmpd + (i << PAGE_SHIFT);
|
||||
*((pd_entry_t *)va) |= PG_RW | PG_V | PG_U | PG_M | pg_nx;
|
||||
*((pd_entry_t *)va) |= PG_RW | PG_V | PG_U | PG_M | pg_nx |
|
||||
pg_crypt;
|
||||
}
|
||||
|
||||
kpm->pm_pdir[PDIR_SLOT_DIRECT] = dmpdp | PG_V | PG_KW | PG_U |
|
||||
PG_M | pg_nx;
|
||||
PG_M | pg_nx | pg_crypt;
|
||||
|
||||
/* Map any remaining physical memory > 512GB */
|
||||
for (curslot = 1 ; curslot < NUM_L4_SLOT_DIRECT ; curslot++) {
|
||||
|
@ -818,7 +828,7 @@ pmap_bootstrap(paddr_t first_avail, paddr_t max_pa)
|
|||
dmpd = first_avail; first_avail += PAGE_SIZE;
|
||||
pml3 = (pt_entry_t *)PMAP_DIRECT_MAP(dmpd);
|
||||
kpm->pm_pdir[PDIR_SLOT_DIRECT + curslot] = dmpd |
|
||||
PG_KW | PG_V | PG_U | PG_M | pg_nx;
|
||||
PG_KW | PG_V | PG_U | PG_M | pg_nx | pg_crypt;
|
||||
|
||||
/* Calculate full 1GB pages in this 512GB region */
|
||||
p = ((max_pa - start_cur) >> L3_SHIFT);
|
||||
|
@ -839,7 +849,8 @@ pmap_bootstrap(paddr_t first_avail, paddr_t max_pa)
|
|||
dmpd = first_avail; first_avail += PAGE_SIZE;
|
||||
pml2 = (pt_entry_t *)PMAP_DIRECT_MAP(dmpd);
|
||||
pml3[i] = dmpd |
|
||||
PG_RW | PG_V | PG_U | PG_M | pg_nx;
|
||||
PG_RW | PG_V | PG_U | PG_M | pg_nx |
|
||||
pg_crypt;
|
||||
|
||||
cur_pa = start_cur + (i << L3_SHIFT);
|
||||
j = 0;
|
||||
|
@ -849,7 +860,8 @@ pmap_bootstrap(paddr_t first_avail, paddr_t max_pa)
|
|||
(uint64_t)i * NBPD_L3 +
|
||||
(uint64_t)j * NBPD_L2;
|
||||
pml2[j] |= PG_RW | PG_V | pg_g_kern |
|
||||
PG_U | PG_M | pg_nx | PG_PS;
|
||||
PG_U | PG_M | pg_nx | PG_PS |
|
||||
pg_crypt;
|
||||
cur_pa += NBPD_L2;
|
||||
j++;
|
||||
}
|
||||
|
@ -949,14 +961,14 @@ pmap_randomize(void)
|
|||
proc0.p_addr->u_pcb.pcb_cr3 = pml4pa;
|
||||
|
||||
/* Fixup recursive PTE PML4E slot. We are only changing the PA */
|
||||
pml4va[PDIR_SLOT_PTE] = pml4pa | (pml4va[PDIR_SLOT_PTE] & ~PG_FRAME);
|
||||
pml4va[PDIR_SLOT_PTE] = pml4pa | (pml4va[PDIR_SLOT_PTE] & ~pg_frame);
|
||||
|
||||
for (i = 0; i < NPDPG; i++) {
|
||||
/* PTE slot already handled earlier */
|
||||
if (i == PDIR_SLOT_PTE)
|
||||
continue;
|
||||
|
||||
if (pml4va[i] & PG_FRAME)
|
||||
if (pml4va[i] & pg_frame)
|
||||
pmap_randomize_level(&pml4va[i], 3);
|
||||
}
|
||||
|
||||
|
@ -985,11 +997,11 @@ pmap_randomize_level(pd_entry_t *pde, int level)
|
|||
panic("%s: cannot allocate page for L%d page directory",
|
||||
__func__, level);
|
||||
|
||||
old_pd_pa = *pde & PG_FRAME;
|
||||
old_pd_pa = *pde & pg_frame;
|
||||
old_pd_va = PMAP_DIRECT_MAP(old_pd_pa);
|
||||
pmap_extract(pmap_kernel(), (vaddr_t)new_pd_va, &new_pd_pa);
|
||||
memcpy(new_pd_va, (void *)old_pd_va, PAGE_SIZE);
|
||||
*pde = new_pd_pa | (*pde & ~PG_FRAME);
|
||||
*pde = new_pd_pa | (*pde & ~pg_frame);
|
||||
|
||||
tlbflush();
|
||||
memset((void *)old_pd_va, 0, PAGE_SIZE);
|
||||
|
@ -1003,7 +1015,7 @@ pmap_randomize_level(pd_entry_t *pde, int level)
|
|||
}
|
||||
|
||||
for (i = 0; i < NPDPG; i++)
|
||||
if (new_pd_va[i] & PG_FRAME)
|
||||
if (new_pd_va[i] & pg_frame)
|
||||
pmap_randomize_level(&new_pd_va[i], level - 1);
|
||||
}
|
||||
|
||||
|
@ -1023,7 +1035,8 @@ pmap_prealloc_lowmem_ptps(paddr_t first_avail)
|
|||
for (;;) {
|
||||
newp = first_avail; first_avail += PAGE_SIZE;
|
||||
memset((void *)PMAP_DIRECT_MAP(newp), 0, PAGE_SIZE);
|
||||
pdes[pl_i(0, level)] = (newp & PG_FRAME) | PG_V | PG_RW;
|
||||
pdes[pl_i(0, level)] =
|
||||
(newp & pg_frame) | PG_V | PG_RW | pg_crypt;
|
||||
level--;
|
||||
if (level <= 1)
|
||||
break;
|
||||
|
@ -1203,7 +1216,7 @@ pmap_get_ptp(struct pmap *pmap, vaddr_t va)
|
|||
pva = normal_pdes[i - 2];
|
||||
|
||||
if (pmap_valid_entry(pva[index])) {
|
||||
ppa = pva[index] & PG_FRAME;
|
||||
ppa = pva[index] & pg_frame;
|
||||
ptp = NULL;
|
||||
continue;
|
||||
}
|
||||
|
@ -1219,7 +1232,7 @@ pmap_get_ptp(struct pmap *pmap, vaddr_t va)
|
|||
ptp->wire_count = 1;
|
||||
pmap->pm_ptphint[i - 2] = ptp;
|
||||
pa = VM_PAGE_TO_PHYS(ptp);
|
||||
pva[index] = (pd_entry_t) (pa | PG_u | PG_RW | PG_V);
|
||||
pva[index] = (pd_entry_t) (pa | PG_u | PG_RW | PG_V | pg_crypt);
|
||||
|
||||
/*
|
||||
* Meltdown Special case - if we are adding a new PML4e for
|
||||
|
@ -1292,7 +1305,7 @@ pmap_pdp_ctor(pd_entry_t *pdir)
|
|||
memset(pdir, 0, PDIR_SLOT_PTE * sizeof(pd_entry_t));
|
||||
|
||||
/* put in recursive PDE to map the PTEs */
|
||||
pdir[PDIR_SLOT_PTE] = pdirpa | PG_V | PG_KW | pg_nx;
|
||||
pdir[PDIR_SLOT_PTE] = pdirpa | PG_V | PG_KW | pg_nx | pg_crypt;
|
||||
|
||||
npde = nkptp[PTP_LEVELS - 1];
|
||||
|
||||
|
@ -1359,7 +1372,7 @@ pmap_create(void)
|
|||
pmap->pm_pdir = pool_get(&pmap_pdp_pool, PR_WAITOK);
|
||||
pmap_pdp_ctor(pmap->pm_pdir);
|
||||
|
||||
pmap->pm_pdirpa = pmap->pm_pdir[PDIR_SLOT_PTE] & PG_FRAME;
|
||||
pmap->pm_pdirpa = pmap->pm_pdir[PDIR_SLOT_PTE] & pg_frame;
|
||||
|
||||
/*
|
||||
* Intel CPUs need a special page table to be used during usermode
|
||||
|
@ -1557,12 +1570,12 @@ pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
|
|||
|
||||
if (__predict_true(level == 0 && pmap_valid_entry(pte))) {
|
||||
if (pap != NULL)
|
||||
*pap = (pte & PG_FRAME) | (va & PAGE_MASK);
|
||||
*pap = (pte & pg_frame) | (va & PAGE_MASK);
|
||||
return 1;
|
||||
}
|
||||
if (level == 1 && (pte & (PG_PS|PG_V)) == (PG_PS|PG_V)) {
|
||||
if (pap != NULL)
|
||||
*pap = (pte & PG_LGFRAME) | (va & PAGE_MASK_L2);
|
||||
*pap = (pte & pg_lgframe) | (va & PAGE_MASK_L2);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -1661,7 +1674,7 @@ pmap_remove_ptes(struct pmap *pmap, struct vm_page *ptp, vaddr_t ptpva,
|
|||
if (ptp != NULL)
|
||||
ptp->wire_count--; /* dropping a PTE */
|
||||
|
||||
pg = PHYS_TO_VM_PAGE(opte & PG_FRAME);
|
||||
pg = PHYS_TO_VM_PAGE(opte & pg_frame);
|
||||
|
||||
/*
|
||||
* if we are not on a pv list we are done.
|
||||
|
@ -1728,7 +1741,7 @@ pmap_remove_pte(struct pmap *pmap, struct vm_page *ptp, pt_entry_t *pte,
|
|||
if (ptp != NULL)
|
||||
ptp->wire_count--; /* dropping a PTE */
|
||||
|
||||
pg = PHYS_TO_VM_PAGE(opte & PG_FRAME);
|
||||
pg = PHYS_TO_VM_PAGE(opte & pg_frame);
|
||||
|
||||
/*
|
||||
* if we are not on a pv list we are done.
|
||||
|
@ -1808,7 +1821,7 @@ pmap_do_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva, int flags)
|
|||
if (pmap_pdes_valid(sva, &pde)) {
|
||||
|
||||
/* PA of the PTP */
|
||||
ptppa = pde & PG_FRAME;
|
||||
ptppa = pde & pg_frame;
|
||||
|
||||
/* get PTP if non-kernel mapping */
|
||||
|
||||
|
@ -1876,7 +1889,7 @@ pmap_do_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva, int flags)
|
|||
continue;
|
||||
|
||||
/* PA of the PTP */
|
||||
ptppa = pde & PG_FRAME;
|
||||
ptppa = pde & pg_frame;
|
||||
|
||||
/* get PTP if non-kernel mapping */
|
||||
if (pmap == pmap_kernel()) {
|
||||
|
@ -1974,12 +1987,12 @@ pmap_page_remove(struct vm_page *pg)
|
|||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (pve->pv_ptp != NULL && pmap_pdes_valid(pve->pv_va, &pde) &&
|
||||
(pde & PG_FRAME) != VM_PAGE_TO_PHYS(pve->pv_ptp)) {
|
||||
(pde & pg_frame) != VM_PAGE_TO_PHYS(pve->pv_ptp)) {
|
||||
printf("%s: pg=%p: va=%lx, pv_ptp=%p\n", __func__,
|
||||
pg, pve->pv_va, pve->pv_ptp);
|
||||
printf("%s: PTP's phys addr: "
|
||||
"actual=%lx, recorded=%lx\n", __func__,
|
||||
(unsigned long)(pde & PG_FRAME),
|
||||
(unsigned long)(pde & pg_frame),
|
||||
VM_PAGE_TO_PHYS(pve->pv_ptp));
|
||||
panic("%s: mapped managed page has "
|
||||
"invalid pv_ptp field", __func__);
|
||||
|
@ -2140,8 +2153,8 @@ pmap_write_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
|
|||
shootself = (scr3 == 0);
|
||||
|
||||
/* should be ok, but just in case ... */
|
||||
sva &= PG_FRAME;
|
||||
eva &= PG_FRAME;
|
||||
sva &= pg_frame;
|
||||
eva &= pg_frame;
|
||||
|
||||
if (!(prot & PROT_READ))
|
||||
set |= pg_xo;
|
||||
|
@ -2314,7 +2327,7 @@ pmap_enter_special(vaddr_t va, paddr_t pa, vm_prot_t prot)
|
|||
if (!pmap_extract(pmap, (vaddr_t)ptp, &npa))
|
||||
panic("%s: can't locate PDPT page", __func__);
|
||||
|
||||
pd[l4idx] = (npa | PG_RW | PG_V);
|
||||
pd[l4idx] = (npa | PG_RW | PG_V | pg_crypt);
|
||||
|
||||
DPRINTF("%s: allocated new PDPT page at phys 0x%llx, "
|
||||
"setting PML4e[%lld] = 0x%llx\n", __func__,
|
||||
|
@ -2338,7 +2351,7 @@ pmap_enter_special(vaddr_t va, paddr_t pa, vm_prot_t prot)
|
|||
if (!pmap_extract(pmap, (vaddr_t)ptp, &npa))
|
||||
panic("%s: can't locate PD page", __func__);
|
||||
|
||||
pd[l3idx] = (npa | PG_RW | PG_V);
|
||||
pd[l3idx] = (npa | PG_RW | PG_V | pg_crypt);
|
||||
|
||||
DPRINTF("%s: allocated new PD page at phys 0x%llx, "
|
||||
"setting PDPTe[%lld] = 0x%llx\n", __func__,
|
||||
|
@ -2362,7 +2375,7 @@ pmap_enter_special(vaddr_t va, paddr_t pa, vm_prot_t prot)
|
|||
if (!pmap_extract(pmap, (vaddr_t)ptp, &npa))
|
||||
panic("%s: can't locate PT page", __func__);
|
||||
|
||||
pd[l2idx] = (npa | PG_RW | PG_V);
|
||||
pd[l2idx] = (npa | PG_RW | PG_V | pg_crypt);
|
||||
|
||||
DPRINTF("%s: allocated new PT page at phys 0x%llx, "
|
||||
"setting PDE[%lld] = 0x%llx\n", __func__,
|
||||
|
@ -2378,7 +2391,7 @@ pmap_enter_special(vaddr_t va, paddr_t pa, vm_prot_t prot)
|
|||
"0x%llx was 0x%llx\n", __func__, (uint64_t)npa, (uint64_t)pd,
|
||||
(uint64_t)prot, (uint64_t)pd[l1idx]);
|
||||
|
||||
pd[l1idx] = pa | protection_codes[prot] | PG_V | PG_W;
|
||||
pd[l1idx] = pa | protection_codes[prot] | PG_V | PG_W | pg_crypt;
|
||||
|
||||
/*
|
||||
* Look up the corresponding U+K entry. If we're installing the
|
||||
|
@ -2387,7 +2400,7 @@ pmap_enter_special(vaddr_t va, paddr_t pa, vm_prot_t prot)
|
|||
*/
|
||||
level = pmap_find_pte_direct(pmap, va, &ptes, &offs);
|
||||
if (__predict_true(level == 0 && pmap_valid_entry(ptes[offs]))) {
|
||||
if (((pd[l1idx] ^ ptes[offs]) & PG_FRAME) == 0) {
|
||||
if (((pd[l1idx] ^ ptes[offs]) & pg_frame) == 0) {
|
||||
pd[l1idx] |= PG_G | (ptes[offs] & (PG_N | PG_WT));
|
||||
ptes[offs] |= PG_G;
|
||||
} else {
|
||||
|
@ -2701,6 +2714,7 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
|
|||
struct pv_entry *pve, *opve = NULL;
|
||||
int ptpdelta, wireddelta, resdelta;
|
||||
int wired = (flags & PMAP_WIRED) != 0;
|
||||
int crypt = (flags & PMAP_NOCRYPT) == 0;
|
||||
int nocache = (pa & PMAP_NOCACHE) != 0;
|
||||
int wc = (pa & PMAP_WC) != 0;
|
||||
int error, shootself;
|
||||
|
@ -2778,7 +2792,7 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
|
|||
* want to map?
|
||||
*/
|
||||
|
||||
if ((opte & PG_FRAME) == pa) {
|
||||
if ((opte & pg_frame) == pa) {
|
||||
|
||||
/* if this is on the PVLIST, sync R/M bit */
|
||||
if (opte & PG_PVLIST) {
|
||||
|
@ -2813,7 +2827,7 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
|
|||
*/
|
||||
|
||||
if (opte & PG_PVLIST) {
|
||||
pg = PHYS_TO_VM_PAGE(opte & PG_FRAME);
|
||||
pg = PHYS_TO_VM_PAGE(opte & pg_frame);
|
||||
#ifdef DIAGNOSTIC
|
||||
if (pg == NULL)
|
||||
panic("%s: PG_PVLIST mapping with unmanaged "
|
||||
|
@ -2887,6 +2901,8 @@ enter_now:
|
|||
npte |= (PG_u | PG_RW); /* XXXCDC: no longer needed? */
|
||||
if (pmap == pmap_kernel())
|
||||
npte |= pg_g_kern;
|
||||
if (crypt)
|
||||
npte |= pg_crypt;
|
||||
|
||||
/*
|
||||
* If the old entry wasn't valid, we can just update it and
|
||||
|
@ -2998,7 +3014,7 @@ pmap_alloc_level(vaddr_t kva, int lvl, long *needed_ptps)
|
|||
|
||||
for (i = index; i <= endindex; i++) {
|
||||
pmap_get_physpage(va, level - 1, &pa);
|
||||
pdep[i] = pa | PG_RW | PG_V | pg_nx;
|
||||
pdep[i] = pa | PG_RW | PG_V | pg_nx | pg_crypt;
|
||||
nkptp[level - 1]++;
|
||||
va += nbpd[level - 1];
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: vmm_support.S,v 1.27 2024/04/01 05:11:49 guenther Exp $ */
|
||||
/* $OpenBSD: vmm_support.S,v 1.28 2024/07/09 11:15:58 deraadt Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
|
||||
*
|
||||
|
@ -57,6 +57,7 @@ vmm_dispatch_intr:
|
|||
pushq %rax
|
||||
cli
|
||||
callq *%rdi
|
||||
movq $0,-8(%rsp)
|
||||
ret
|
||||
lfence
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: pmap.h,v 1.88 2023/12/29 13:23:28 jca Exp $ */
|
||||
/* $OpenBSD: pmap.h,v 1.89 2024/07/09 19:11:06 bluhm Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.1 2003/04/26 18:39:46 fvdl Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -320,6 +320,7 @@ struct pmap {
|
|||
};
|
||||
|
||||
#define PMAP_EFI PMAP_MD0
|
||||
#define PMAP_NOCRYPT PMAP_MD1
|
||||
|
||||
/*
|
||||
* MD flags that we use for pmap_enter (in the pa):
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: pte.h,v 1.17 2023/01/20 16:01:04 deraadt Exp $ */
|
||||
/* $OpenBSD: pte.h,v 1.18 2024/07/09 19:11:06 bluhm Exp $ */
|
||||
/* $NetBSD: pte.h,v 1.1 2003/04/26 18:39:47 fvdl Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -164,6 +164,7 @@ typedef u_int64_t pt_entry_t; /* PTE */
|
|||
#ifdef _KERNEL
|
||||
extern pt_entry_t pg_xo; /* XO pte bits using PKU key1 */
|
||||
extern pt_entry_t pg_nx; /* NX pte bit */
|
||||
extern pt_entry_t pg_crypt; /* C pte bit */
|
||||
extern pt_entry_t pg_g_kern; /* PG_G if glbl mappings can be used in kern */
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: vmmvar.h,v 1.101 2024/04/29 14:47:05 dv Exp $ */
|
||||
/* $OpenBSD: vmmvar.h,v 1.102 2024/07/09 09:31:37 dv Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
|
||||
*
|
||||
|
@ -23,14 +23,6 @@
|
|||
|
||||
#define VMM_HV_SIGNATURE "OpenBSDVMM58"
|
||||
|
||||
#define VMM_MAX_MEM_RANGES 16
|
||||
#define VMM_MAX_DISKS_PER_VM 4
|
||||
#define VMM_MAX_NAME_LEN 64
|
||||
#define VMM_MAX_VCPUS 512
|
||||
#define VMM_MAX_VCPUS_PER_VM 64
|
||||
#define VMM_MAX_VM_MEM_SIZE 128L * 1024 * 1024 * 1024
|
||||
#define VMM_MAX_NICS_PER_VM 4
|
||||
|
||||
#define VMM_PCI_MMIO_BAR_BASE 0xF0000000ULL
|
||||
#define VMM_PCI_MMIO_BAR_END 0xFFDFFFFFULL /* 2 MiB below 4 GiB */
|
||||
|
||||
|
@ -474,21 +466,6 @@ struct vm_exit {
|
|||
int cpl;
|
||||
};
|
||||
|
||||
struct vm_run_params {
|
||||
/* Input parameters to VMM_IOC_RUN */
|
||||
uint32_t vrp_vm_id;
|
||||
uint32_t vrp_vcpu_id;
|
||||
struct vcpu_inject_event vrp_inject;
|
||||
uint8_t vrp_intr_pending; /* Additional intrs pending? */
|
||||
|
||||
/* Input/output parameter to VMM_IOC_RUN */
|
||||
struct vm_exit *vrp_exit; /* updated exit data */
|
||||
|
||||
/* Output parameter from VMM_IOC_RUN */
|
||||
uint16_t vrp_exit_reason; /* exit reason */
|
||||
uint8_t vrp_irqready; /* ready for IRQ on entry */
|
||||
};
|
||||
|
||||
struct vm_intr_params {
|
||||
/* Input parameters to VMM_IOC_INTR */
|
||||
uint32_t vip_vm_id;
|
||||
|
@ -961,7 +938,6 @@ int vcpu_init(struct vcpu *);
|
|||
void vcpu_deinit(struct vcpu *);
|
||||
int vm_rwvmparams(struct vm_rwvmparams_params *, int);
|
||||
int vm_rwregs(struct vm_rwregs_params *, int);
|
||||
int vm_run(struct vm_run_params *);
|
||||
int vcpu_reset_regs(struct vcpu *, struct vcpu_reg_state *);
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: db_disasm.c,v 1.24 2020/09/11 09:27:10 mpi Exp $ */
|
||||
/* $OpenBSD: db_disasm.c,v 1.25 2024/07/09 01:21:19 jsg Exp $ */
|
||||
/* $NetBSD: db_disasm.c,v 1.11 1996/05/03 19:41:58 christos Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -410,8 +410,8 @@ struct finst db_Esca[] = {
|
|||
/*1*/ { "fimul", LONG, 0, 0 },
|
||||
/*2*/ { "ficom", LONG, 0, 0 },
|
||||
/*3*/ { "ficomp", LONG, 0, 0 },
|
||||
/*4*/ { "fisub", LONG, op1(X), 0 },
|
||||
/*5*/ { "fisubr", LONG, 0, 0 },
|
||||
/*4*/ { "fisub", LONG, 0, 0 },
|
||||
/*5*/ { "fisubr", LONG, op1(X), db_Esca5 },
|
||||
/*6*/ { "fidiv", LONG, 0, 0 },
|
||||
/*7*/ { "fidivr", LONG, 0, 0 }
|
||||
};
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: machdep.c,v 1.672 2024/06/07 16:53:35 kettenis Exp $ */
|
||||
/* $OpenBSD: machdep.c,v 1.673 2024/07/09 07:28:12 mlarkin Exp $ */
|
||||
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
|
||||
|
||||
/*-
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: dwpcie.c,v 1.55 2024/07/05 22:52:25 patrick Exp $ */
|
||||
/* $OpenBSD: dwpcie.c,v 1.56 2024/07/09 08:47:10 kettenis Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
|
||||
*
|
||||
|
@ -60,9 +60,9 @@
|
|||
|
||||
#define PCIE_MSI_ADDR_LO 0x820
|
||||
#define PCIE_MSI_ADDR_HI 0x824
|
||||
#define PCIE_MSI_INTR0_ENABLE 0x828
|
||||
#define PCIE_MSI_INTR0_MASK 0x82c
|
||||
#define PCIE_MSI_INTR0_STATUS 0x830
|
||||
#define PCIE_MSI_INTR_ENABLE(x) (0x828 + (x) * 12)
|
||||
#define PCIE_MSI_INTR_MASK(x) (0x82c + (x) * 12)
|
||||
#define PCIE_MSI_INTR_STATUS(x) (0x830 + (x) * 12)
|
||||
|
||||
#define MISC_CONTROL_1 0x8bc
|
||||
#define MISC_CONTROL_1_DBI_RO_WR_EN (1 << 0)
|
||||
|
@ -215,7 +215,7 @@ struct dwpcie_intx {
|
|||
TAILQ_ENTRY(dwpcie_intx) di_next;
|
||||
};
|
||||
|
||||
#define DWPCIE_NUM_MSI 32
|
||||
#define DWPCIE_MAX_MSI 64
|
||||
|
||||
struct dwpcie_msi {
|
||||
int (*dm_func)(void *);
|
||||
|
@ -223,6 +223,7 @@ struct dwpcie_msi {
|
|||
int dm_ipl;
|
||||
int dm_flags;
|
||||
int dm_vec;
|
||||
int dm_nvec;
|
||||
struct evcount dm_count;
|
||||
char *dm_name;
|
||||
};
|
||||
|
@ -280,8 +281,11 @@ struct dwpcie_softc {
|
|||
struct interrupt_controller sc_ic;
|
||||
TAILQ_HEAD(,dwpcie_intx) sc_intx[4];
|
||||
|
||||
void *sc_msi_ih[2];
|
||||
uint64_t sc_msi_addr;
|
||||
struct dwpcie_msi sc_msi[DWPCIE_NUM_MSI];
|
||||
uint64_t sc_msi_mask;
|
||||
struct dwpcie_msi sc_msi[DWPCIE_MAX_MSI];
|
||||
int sc_num_msi;
|
||||
};
|
||||
|
||||
struct dwpcie_intr_handle {
|
||||
|
@ -727,12 +731,20 @@ dwpcie_attach_deferred(struct device *self)
|
|||
pba.pba_pc = &sc->sc_pc;
|
||||
pba.pba_domain = pci_ndomains++;
|
||||
pba.pba_bus = sc->sc_bus;
|
||||
|
||||
if (OF_is_compatible(sc->sc_node, "baikal,bm1000-pcie") ||
|
||||
OF_is_compatible(sc->sc_node, "marvell,armada8k-pcie") ||
|
||||
OF_getproplen(sc->sc_node, "msi-map") > 0 ||
|
||||
sc->sc_msi_addr)
|
||||
pba.pba_flags |= PCI_FLAGS_MSI_ENABLED;
|
||||
if (OF_getproplen(sc->sc_node, "msi-map") > 0)
|
||||
|
||||
/*
|
||||
* Only support mutiple MSI vectors if we have enough MSI
|
||||
* interrupts (or are using an external interrupt controller
|
||||
* that hopefully suppors plenty of MSI interripts).
|
||||
*/
|
||||
if (OF_getproplen(sc->sc_node, "msi-map") > 0 ||
|
||||
sc->sc_num_msi > 32)
|
||||
pba.pba_flags |= PCI_FLAGS_MSIVEC_ENABLED;
|
||||
|
||||
pci_dopm = 1;
|
||||
|
@ -786,23 +798,22 @@ dwpcie_link_config(struct dwpcie_softc *sc)
|
|||
}
|
||||
|
||||
int
|
||||
dwpcie_msi_intr(void *arg)
|
||||
dwpcie_msi_intr(struct dwpcie_softc *sc, int idx)
|
||||
{
|
||||
struct dwpcie_softc *sc = arg;
|
||||
struct dwpcie_msi *dm;
|
||||
uint32_t status;
|
||||
int vec, s;
|
||||
|
||||
status = HREAD4(sc, PCIE_MSI_INTR0_STATUS);
|
||||
status = HREAD4(sc, PCIE_MSI_INTR_STATUS(idx));
|
||||
if (status == 0)
|
||||
return 0;
|
||||
|
||||
HWRITE4(sc, PCIE_MSI_INTR0_STATUS, status);
|
||||
HWRITE4(sc, PCIE_MSI_INTR_STATUS(idx), status);
|
||||
while (status) {
|
||||
vec = ffs(status) - 1;
|
||||
status &= ~(1U << vec);
|
||||
|
||||
dm = &sc->sc_msi[vec];
|
||||
dm = &sc->sc_msi[idx * 32 + vec];
|
||||
if (dm->dm_func == NULL)
|
||||
continue;
|
||||
|
||||
|
@ -819,6 +830,18 @@ dwpcie_msi_intr(void *arg)
|
|||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
dwpcie_msi0_intr(void *arg)
|
||||
{
|
||||
return dwpcie_msi_intr(arg, 0);
|
||||
}
|
||||
|
||||
int
|
||||
dwpcie_msi1_intr(void *arg)
|
||||
{
|
||||
return dwpcie_msi_intr(arg, 1);
|
||||
}
|
||||
|
||||
int
|
||||
dwpcie_msi_init(struct dwpcie_softc *sc)
|
||||
{
|
||||
|
@ -826,6 +849,7 @@ dwpcie_msi_init(struct dwpcie_softc *sc)
|
|||
bus_dmamap_t map;
|
||||
uint64_t addr;
|
||||
int error, rseg;
|
||||
int idx;
|
||||
|
||||
/*
|
||||
* Allocate some DMA memory such that we have a "safe" target
|
||||
|
@ -861,19 +885,46 @@ dwpcie_msi_init(struct dwpcie_softc *sc)
|
|||
bus_dmamap_unload(sc->sc_dmat, map);
|
||||
bus_dmamap_destroy(sc->sc_dmat, map);
|
||||
|
||||
/* Enable, mask and clear all MSIs. */
|
||||
HWRITE4(sc, PCIE_MSI_INTR0_ENABLE, 0xffffffff);
|
||||
HWRITE4(sc, PCIE_MSI_INTR0_MASK, 0xffffffff);
|
||||
HWRITE4(sc, PCIE_MSI_INTR0_STATUS, 0xffffffff);
|
||||
/*
|
||||
* See if the device tree indicates that the hardware supports
|
||||
* more than 32 vectors. Some hardware supports more than 64,
|
||||
* but 64 is good enough for now.
|
||||
*/
|
||||
idx = OF_getindex(sc->sc_node, "msi1", "interrupt-names");
|
||||
if (idx == -1)
|
||||
sc->sc_num_msi = 32;
|
||||
else
|
||||
sc->sc_num_msi = 64;
|
||||
KASSERT(sc->sc_num_msi <= DWPCIE_MAX_MSI);
|
||||
|
||||
KASSERT(sc->sc_ih == NULL);
|
||||
sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_BIO | IPL_MPSAFE,
|
||||
dwpcie_msi_intr, sc, sc->sc_dev.dv_xname);
|
||||
if (sc->sc_ih == NULL) {
|
||||
/* Enable, mask and clear all MSIs. */
|
||||
for (idx = 0; idx < sc->sc_num_msi / 32; idx++) {
|
||||
HWRITE4(sc, PCIE_MSI_INTR_ENABLE(idx), 0xffffffff);
|
||||
HWRITE4(sc, PCIE_MSI_INTR_MASK(idx), 0xffffffff);
|
||||
HWRITE4(sc, PCIE_MSI_INTR_STATUS(idx), 0xffffffff);
|
||||
}
|
||||
|
||||
idx = OF_getindex(sc->sc_node, "msi0", "interrupt-names");
|
||||
if (idx == -1)
|
||||
idx = 0;
|
||||
|
||||
sc->sc_msi_ih[0] = fdt_intr_establish_idx(sc->sc_node, idx,
|
||||
IPL_BIO | IPL_MPSAFE, dwpcie_msi0_intr, sc, sc->sc_dev.dv_xname);
|
||||
if (sc->sc_msi_ih[0] == NULL) {
|
||||
bus_dmamem_free(sc->sc_dmat, &seg, 1);
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
idx = OF_getindex(sc->sc_node, "msi1", "interrupt-names");
|
||||
if (idx == -1)
|
||||
goto finish;
|
||||
|
||||
sc->sc_msi_ih[1] = fdt_intr_establish_idx(sc->sc_node, idx,
|
||||
IPL_BIO | IPL_MPSAFE, dwpcie_msi1_intr, sc, sc->sc_dev.dv_xname);
|
||||
if (sc->sc_msi_ih[1] == NULL)
|
||||
sc->sc_num_msi = 32;
|
||||
|
||||
finish:
|
||||
/*
|
||||
* Hold on to the DMA memory such that nobody can use it to
|
||||
* actually do DMA transfers.
|
||||
|
@ -1781,31 +1832,81 @@ dwpcie_intr_string(void *v, pci_intr_handle_t ih)
|
|||
}
|
||||
|
||||
struct dwpcie_msi *
|
||||
dwpcie_msi_establish(struct dwpcie_softc *sc, int level,
|
||||
int (*func)(void *), void *arg, char *name)
|
||||
dwpcie_msi_establish(struct dwpcie_softc *sc, pci_intr_handle_t *ihp,
|
||||
int level, int (*func)(void *), void *arg, char *name)
|
||||
{
|
||||
pci_chipset_tag_t pc = ihp->ih_pc;
|
||||
pcitag_t tag = ihp->ih_tag;
|
||||
struct dwpcie_msi *dm;
|
||||
int vec;
|
||||
uint64_t msi_mask;
|
||||
int vec = ihp->ih_intrpin;
|
||||
int base, mme, nvec, off;
|
||||
pcireg_t reg;
|
||||
|
||||
for (vec = 0; vec < DWPCIE_NUM_MSI; vec++) {
|
||||
dm = &sc->sc_msi[vec];
|
||||
if (dm->dm_func == NULL)
|
||||
break;
|
||||
if (ihp->ih_type == PCI_MSI) {
|
||||
if (pci_get_capability(pc, tag, PCI_CAP_MSI, &off, ®) == 0)
|
||||
panic("%s: no msi capability", __func__);
|
||||
|
||||
reg = pci_conf_read(ihp->ih_pc, ihp->ih_tag, off);
|
||||
mme = ((reg & PCI_MSI_MC_MME_MASK) >> PCI_MSI_MC_MME_SHIFT);
|
||||
if (vec >= (1 << mme))
|
||||
return NULL;
|
||||
if (reg & PCI_MSI_MC_C64)
|
||||
base = pci_conf_read(pc, tag, off + PCI_MSI_MD64);
|
||||
else
|
||||
base = pci_conf_read(pc, tag, off + PCI_MSI_MD32);
|
||||
} else {
|
||||
mme = 0;
|
||||
base = 0;
|
||||
}
|
||||
if (vec == DWPCIE_NUM_MSI)
|
||||
|
||||
if (vec == 0) {
|
||||
/*
|
||||
* Pre-allocate all the requested vectors. Remember
|
||||
* the number of requested vectors such that we can
|
||||
* deallocate them in one go.
|
||||
*/
|
||||
msi_mask = (1ULL << (1 << mme)) - 1;
|
||||
while (vec <= sc->sc_num_msi - (1 << mme)) {
|
||||
if ((sc->sc_msi_mask & (msi_mask << vec)) == 0) {
|
||||
sc->sc_msi_mask |= (msi_mask << vec);
|
||||
break;
|
||||
}
|
||||
vec += (1 << mme);
|
||||
}
|
||||
base = vec;
|
||||
nvec = (1 << mme);
|
||||
} else {
|
||||
KASSERT(ihp->ih_type == PCI_MSI);
|
||||
vec += base;
|
||||
nvec = 0;
|
||||
}
|
||||
|
||||
if (vec >= sc->sc_num_msi)
|
||||
return NULL;
|
||||
|
||||
if (ihp->ih_type == PCI_MSI) {
|
||||
if (reg & PCI_MSI_MC_C64)
|
||||
pci_conf_write(pc, tag, off + PCI_MSI_MD64, base);
|
||||
else
|
||||
pci_conf_write(pc, tag, off + PCI_MSI_MD32, base);
|
||||
}
|
||||
|
||||
dm = &sc->sc_msi[vec];
|
||||
KASSERT(dm->dm_func == NULL);
|
||||
|
||||
dm->dm_func = func;
|
||||
dm->dm_arg = arg;
|
||||
dm->dm_ipl = level & IPL_IRQMASK;
|
||||
dm->dm_flags = level & IPL_FLAGMASK;
|
||||
dm->dm_vec = vec;
|
||||
dm->dm_nvec = nvec;
|
||||
dm->dm_name = name;
|
||||
if (name != NULL)
|
||||
evcount_attach(&dm->dm_count, name, &dm->dm_vec);
|
||||
|
||||
/* Unmask the MSI. */
|
||||
HCLR4(sc, PCIE_MSI_INTR0_MASK, (1U << vec));
|
||||
HCLR4(sc, PCIE_MSI_INTR_MASK(vec / 32), (1U << (vec % 32)));
|
||||
|
||||
return dm;
|
||||
}
|
||||
|
@ -1813,12 +1914,21 @@ dwpcie_msi_establish(struct dwpcie_softc *sc, int level,
|
|||
void
|
||||
dwpcie_msi_disestablish(struct dwpcie_softc *sc, struct dwpcie_msi *dm)
|
||||
{
|
||||
uint64_t msi_mask = (1ULL << dm->dm_nvec) - 1;
|
||||
|
||||
/* Mask the MSI. */
|
||||
HSET4(sc, PCIE_MSI_INTR0_MASK, (1U << dm->dm_vec));
|
||||
HSET4(sc, PCIE_MSI_INTR_MASK(dm->dm_vec / 32),
|
||||
(1U << (dm->dm_vec % 32)));
|
||||
|
||||
if (dm->dm_name)
|
||||
evcount_detach(&dm->dm_count);
|
||||
dm->dm_func = NULL;
|
||||
|
||||
/*
|
||||
* Unallocate all allocated vetcors if this is the first
|
||||
* vector for the device.
|
||||
*/
|
||||
sc->sc_msi_mask &= ~(msi_mask << dm->dm_vec);
|
||||
}
|
||||
|
||||
void *
|
||||
|
@ -1839,9 +1949,7 @@ dwpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
|
|||
uint64_t addr, data;
|
||||
|
||||
if (sc->sc_msi_addr) {
|
||||
if (ih.ih_type == PCI_MSI && ih.ih_intrpin > 0)
|
||||
return NULL;
|
||||
dm = dwpcie_msi_establish(sc, level, func, arg, name);
|
||||
dm = dwpcie_msi_establish(sc, &ih, level, func, arg, name);
|
||||
if (dm == NULL)
|
||||
return NULL;
|
||||
addr = sc->sc_msi_addr;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_iavf.c,v 1.13 2024/05/24 06:02:53 jsg Exp $ */
|
||||
/* $OpenBSD: if_iavf.c,v 1.14 2024/07/09 16:04:15 jmatthew Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2013-2015, Intel Corporation
|
||||
|
@ -954,7 +954,7 @@ iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
|
|||
{
|
||||
struct iavf_softc *sc = ifp->if_softc;
|
||||
|
||||
NET_ASSERT_LOCKED();
|
||||
KERNEL_ASSERT_LOCKED();
|
||||
|
||||
ifm->ifm_status = sc->sc_media_status;
|
||||
ifm->ifm_active = sc->sc_media_active;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: vmm.h,v 1.4 2024/01/11 17:13:48 jan Exp $ */
|
||||
/* $OpenBSD: vmm.h,v 1.5 2024/07/09 09:31:37 dv Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2014-2023 Mike Larkin <mlarkin@openbsd.org>
|
||||
*
|
||||
|
@ -20,9 +20,19 @@
|
|||
|
||||
#include <uvm/uvm_extern.h>
|
||||
|
||||
#include <machine/vmmvar.h>
|
||||
|
||||
#ifndef DEV_VMM_H
|
||||
#define DEV_VMM_H
|
||||
|
||||
#define VMM_MAX_MEM_RANGES 16
|
||||
#define VMM_MAX_DISKS_PER_VM 4
|
||||
#define VMM_MAX_NAME_LEN 64
|
||||
#define VMM_MAX_VCPUS 512
|
||||
#define VMM_MAX_VCPUS_PER_VM 64
|
||||
#define VMM_MAX_VM_MEM_SIZE 128L * 1024 * 1024 * 1024
|
||||
#define VMM_MAX_NICS_PER_VM 4
|
||||
|
||||
struct vm_mem_range {
|
||||
paddr_t vmr_gpa;
|
||||
vaddr_t vmr_va;
|
||||
|
@ -83,6 +93,21 @@ struct vm_sharemem_params {
|
|||
struct vm_mem_range vsp_memranges[VMM_MAX_MEM_RANGES];
|
||||
};
|
||||
|
||||
struct vm_run_params {
|
||||
/* Input parameters to VMM_IOC_RUN */
|
||||
uint32_t vrp_vm_id;
|
||||
uint32_t vrp_vcpu_id;
|
||||
struct vcpu_inject_event vrp_inject;
|
||||
uint8_t vrp_intr_pending; /* Additional intrs pending? */
|
||||
|
||||
/* Input/output parameter to VMM_IOC_RUN */
|
||||
struct vm_exit *vrp_exit; /* updated exit data */
|
||||
|
||||
/* Output parameter from VMM_IOC_RUN */
|
||||
uint16_t vrp_exit_reason; /* exit reason */
|
||||
uint8_t vrp_irqready; /* ready for IRQ on entry */
|
||||
};
|
||||
|
||||
/* IOCTL definitions */
|
||||
#define VMM_IOC_CREATE _IOWR('V', 1, struct vm_create_params) /* Create VM */
|
||||
#define VMM_IOC_RUN _IOWR('V', 2, struct vm_run_params) /* Run VCPU */
|
||||
|
@ -202,6 +227,7 @@ int vm_terminate(struct vm_terminate_params *);
|
|||
int vm_resetcpu(struct vm_resetcpu_params *);
|
||||
int vcpu_must_stop(struct vcpu *);
|
||||
int vm_share_mem(struct vm_sharemem_params *, struct proc *);
|
||||
int vm_run(struct vm_run_params *);
|
||||
|
||||
#ifdef VMM_DEBUG
|
||||
void dump_vcpu(struct vcpu *);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: kern_resource.c,v 1.85 2024/07/08 13:17:12 claudio Exp $ */
|
||||
/* $OpenBSD: kern_resource.c,v 1.86 2024/07/09 15:20:15 claudio Exp $ */
|
||||
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
|
||||
|
||||
/*-
|
||||
|
@ -432,7 +432,6 @@ void
|
|||
tuagg_add_process(struct process *pr, struct proc *p)
|
||||
{
|
||||
MUTEX_ASSERT_LOCKED(&pr->ps_mtx);
|
||||
splassert(IPL_STATCLOCK);
|
||||
KASSERT(curproc == p || p->p_stat == SDEAD);
|
||||
|
||||
tu_enter(&pr->ps_tu);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: kern_sched.c,v 1.99 2024/07/08 16:15:42 mpi Exp $ */
|
||||
/* $OpenBSD: kern_sched.c,v 1.100 2024/07/09 08:44:36 claudio Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
|
||||
*
|
||||
|
@ -261,8 +261,9 @@ sched_toidle(void)
|
|||
idle->p_stat = SRUN;
|
||||
|
||||
uvmexp.swtch++;
|
||||
TRACEPOINT(sched, off__cpu, idle->p_tid + THREAD_PID_OFFSET,
|
||||
idle->p_p->ps_pid);
|
||||
if (curproc != NULL)
|
||||
TRACEPOINT(sched, off__cpu, idle->p_tid + THREAD_PID_OFFSET,
|
||||
idle->p_p->ps_pid);
|
||||
cpu_switchto(NULL, idle);
|
||||
panic("cpu_switchto returned");
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: kern_sig.c,v 1.330 2024/06/03 12:48:25 claudio Exp $ */
|
||||
/* $OpenBSD: kern_sig.c,v 1.331 2024/07/09 09:22:50 claudio Exp $ */
|
||||
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -1065,6 +1065,73 @@ ptsignal(struct proc *p, int signum, enum signal_type type)
|
|||
|
||||
switch (p->p_stat) {
|
||||
|
||||
case SSTOP:
|
||||
/*
|
||||
* If traced process is already stopped,
|
||||
* then no further action is necessary.
|
||||
*/
|
||||
if (pr->ps_flags & PS_TRACED)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Kill signal always sets processes running.
|
||||
*/
|
||||
if (signum == SIGKILL) {
|
||||
atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
|
||||
goto runfast;
|
||||
}
|
||||
|
||||
if (prop & SA_CONT) {
|
||||
/*
|
||||
* If SIGCONT is default (or ignored), we continue the
|
||||
* process but don't leave the signal in p_siglist, as
|
||||
* it has no further action. If SIGCONT is held, we
|
||||
* continue the process and leave the signal in
|
||||
* p_siglist. If the process catches SIGCONT, let it
|
||||
* handle the signal itself. If it isn't waiting on
|
||||
* an event, then it goes back to run state.
|
||||
* Otherwise, process goes back to sleep state.
|
||||
*/
|
||||
atomic_setbits_int(&p->p_flag, P_CONTINUED);
|
||||
atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
|
||||
wakeparent = 1;
|
||||
if (action == SIG_DFL)
|
||||
mask = 0;
|
||||
if (action == SIG_CATCH)
|
||||
goto runfast;
|
||||
if (p->p_wchan == NULL)
|
||||
goto run;
|
||||
atomic_clearbits_int(&p->p_flag, P_WSLEEP);
|
||||
p->p_stat = SSLEEP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Defer further processing for signals which are held,
|
||||
* except that stopped processes must be continued by SIGCONT.
|
||||
*/
|
||||
if (action == SIG_HOLD)
|
||||
goto out;
|
||||
|
||||
if (prop & SA_STOP) {
|
||||
/*
|
||||
* Already stopped, don't need to stop again.
|
||||
* (If we did the shell could get confused.)
|
||||
*/
|
||||
mask = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If process is sleeping interruptibly, then simulate a
|
||||
* wakeup so that when it is continued, it will be made
|
||||
* runnable and can look at the signal. But don't make
|
||||
* the process runnable, leave it stopped.
|
||||
*/
|
||||
if (p->p_flag & P_SINTR)
|
||||
unsleep(p);
|
||||
goto out;
|
||||
|
||||
case SSLEEP:
|
||||
/*
|
||||
* If process is sleeping uninterruptibly
|
||||
|
@ -1143,73 +1210,6 @@ ptsignal(struct proc *p, int signum, enum signal_type type)
|
|||
goto runfast;
|
||||
/* NOTREACHED */
|
||||
|
||||
case SSTOP:
|
||||
/*
|
||||
* If traced process is already stopped,
|
||||
* then no further action is necessary.
|
||||
*/
|
||||
if (pr->ps_flags & PS_TRACED)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Kill signal always sets processes running.
|
||||
*/
|
||||
if (signum == SIGKILL) {
|
||||
atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
|
||||
goto runfast;
|
||||
}
|
||||
|
||||
if (prop & SA_CONT) {
|
||||
/*
|
||||
* If SIGCONT is default (or ignored), we continue the
|
||||
* process but don't leave the signal in p_siglist, as
|
||||
* it has no further action. If SIGCONT is held, we
|
||||
* continue the process and leave the signal in
|
||||
* p_siglist. If the process catches SIGCONT, let it
|
||||
* handle the signal itself. If it isn't waiting on
|
||||
* an event, then it goes back to run state.
|
||||
* Otherwise, process goes back to sleep state.
|
||||
*/
|
||||
atomic_setbits_int(&p->p_flag, P_CONTINUED);
|
||||
atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
|
||||
wakeparent = 1;
|
||||
if (action == SIG_DFL)
|
||||
mask = 0;
|
||||
if (action == SIG_CATCH)
|
||||
goto runfast;
|
||||
if (p->p_wchan == NULL)
|
||||
goto run;
|
||||
atomic_clearbits_int(&p->p_flag, P_WSLEEP);
|
||||
p->p_stat = SSLEEP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Defer further processing for signals which are held,
|
||||
* except that stopped processes must be continued by SIGCONT.
|
||||
*/
|
||||
if (action == SIG_HOLD)
|
||||
goto out;
|
||||
|
||||
if (prop & SA_STOP) {
|
||||
/*
|
||||
* Already stopped, don't need to stop again.
|
||||
* (If we did the shell could get confused.)
|
||||
*/
|
||||
mask = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If process is sleeping interruptibly, then simulate a
|
||||
* wakeup so that when it is continued, it will be made
|
||||
* runnable and can look at the signal. But don't make
|
||||
* the process runnable, leave it stopped.
|
||||
*/
|
||||
if (p->p_flag & P_SINTR)
|
||||
unsleep(p);
|
||||
goto out;
|
||||
|
||||
case SONPROC:
|
||||
if (action == SIG_HOLD)
|
||||
goto out;
|
||||
|
@ -2160,8 +2160,12 @@ single_thread_set(struct proc *p, int flags)
|
|||
SCHED_LOCK();
|
||||
atomic_setbits_int(&q->p_flag, P_SUSPSINGLE);
|
||||
switch (q->p_stat) {
|
||||
case SIDL:
|
||||
case SDEAD:
|
||||
case SSTOP:
|
||||
if (mode == SINGLE_EXIT) {
|
||||
unsleep(q);
|
||||
setrunnable(q);
|
||||
} else
|
||||
--pr->ps_singlecnt;
|
||||
break;
|
||||
case SSLEEP:
|
||||
/* if it's not interruptible, then just have to wait */
|
||||
|
@ -2177,17 +2181,12 @@ single_thread_set(struct proc *p, int flags)
|
|||
setrunnable(q);
|
||||
}
|
||||
break;
|
||||
case SSTOP:
|
||||
if (mode == SINGLE_EXIT) {
|
||||
unsleep(q);
|
||||
setrunnable(q);
|
||||
} else
|
||||
--pr->ps_singlecnt;
|
||||
break;
|
||||
case SONPROC:
|
||||
signotify(q);
|
||||
/* FALLTHROUGH */
|
||||
break;
|
||||
case SRUN:
|
||||
case SIDL:
|
||||
case SDEAD:
|
||||
break;
|
||||
}
|
||||
SCHED_UNLOCK();
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: sysv_sem.c,v 1.63 2022/09/28 13:21:13 mbuhl Exp $ */
|
||||
/* $OpenBSD: sysv_sem.c,v 1.64 2024/07/09 04:42:48 jsg Exp $ */
|
||||
/* $NetBSD: sysv_sem.c,v 1.26 1996/02/09 19:00:25 christos Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -564,7 +564,7 @@ sys_semop(struct proc *p, void *v, register_t *retval)
|
|||
sops = mallocarray(nsops, sizeof(struct sembuf), M_SEM, M_WAITOK);
|
||||
error = copyin(SCARG(uap, sops), sops, nsops * sizeof(struct sembuf));
|
||||
if (error != 0) {
|
||||
DPRINTF(("error = %d from copyin(%p, %p, %u)\n", error,
|
||||
DPRINTF(("error = %d from copyin(%p, %p, %zu)\n", error,
|
||||
SCARG(uap, sops), &sops, nsops * sizeof(struct sembuf)));
|
||||
goto done2;
|
||||
}
|
||||
|
@ -593,7 +593,7 @@ sys_semop(struct proc *p, void *v, register_t *retval)
|
|||
|
||||
semptr = &semaptr->sem_base[sopptr->sem_num];
|
||||
|
||||
DPRINTF(("semop: semaptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
|
||||
DPRINTF(("semop: semaptr=%p, sem_base=%p, semptr=%p, sem[%d]=%d : op=%d, flag=%s\n",
|
||||
semaptr, semaptr->sem_base, semptr,
|
||||
sopptr->sem_num, semptr->semval, sopptr->sem_op,
|
||||
(sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait"));
|
||||
|
@ -634,7 +634,7 @@ sys_semop(struct proc *p, void *v, register_t *retval)
|
|||
/*
|
||||
* No ... rollback anything that we've already done
|
||||
*/
|
||||
DPRINTF(("semop: rollback 0 through %d\n", i - 1));
|
||||
DPRINTF(("semop: rollback 0 through %zu\n", i - 1));
|
||||
for (j = 0; j < i; j++)
|
||||
semaptr->sem_base[sops[j].sem_num].semval -=
|
||||
sops[j].sem_op;
|
||||
|
|
|
@ -434,6 +434,9 @@ typedef uLong FAR uLongf;
|
|||
#ifdef _KERNEL
|
||||
# define Z_HAVE_UNISTD_H
|
||||
#endif
|
||||
#ifdef _STANDALONE
|
||||
# define z_off_t long
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */
|
||||
# define Z_HAVE_UNISTD_H
|
||||
|
@ -508,7 +511,7 @@ typedef uLong FAR uLongf;
|
|||
#endif
|
||||
|
||||
#ifndef z_off_t
|
||||
# define z_off_t long
|
||||
# define z_off_t long long
|
||||
#endif
|
||||
|
||||
#if !defined(_WIN32) && defined(Z_LARGE64)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: ip6_forward.c,v 1.120 2024/07/04 12:50:08 bluhm Exp $ */
|
||||
/* $OpenBSD: ip6_forward.c,v 1.121 2024/07/09 09:33:13 bluhm Exp $ */
|
||||
/* $KAME: ip6_forward.c,v 1.75 2001/06/29 12:42:13 jinmei Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -89,8 +89,16 @@ ip6_forward(struct mbuf *m, struct route *ro, int flags)
|
|||
struct rtentry *rt;
|
||||
struct sockaddr *dst;
|
||||
struct ifnet *ifp = NULL;
|
||||
int error = 0, type = 0, code = 0, destmtu = 0;
|
||||
u_int rtableid = m->m_pkthdr.ph_rtableid;
|
||||
u_int ifidx = m->m_pkthdr.ph_ifidx;
|
||||
u_int8_t loopcnt = m->m_pkthdr.ph_loopcnt;
|
||||
u_int icmp_len;
|
||||
char icmp_buf[MHLEN];
|
||||
CTASSERT(sizeof(struct ip6_hdr) + sizeof(struct tcphdr) +
|
||||
MAX_TCPOPTLEN <= sizeof(icmp_buf));
|
||||
u_short mflags, pfflags;
|
||||
struct mbuf *mcopy;
|
||||
int error = 0, type = 0, code = 0, destmtu = 0;
|
||||
#ifdef IPSEC
|
||||
struct tdb *tdb = NULL;
|
||||
#endif /* IPSEC */
|
||||
|
@ -117,9 +125,7 @@ ip6_forward(struct mbuf *m, struct route *ro, int flags)
|
|||
log(LOG_DEBUG,
|
||||
"cannot forward "
|
||||
"from %s to %s nxt %d received on interface %u\n",
|
||||
src6, dst6,
|
||||
ip6->ip6_nxt,
|
||||
m->m_pkthdr.ph_ifidx);
|
||||
src6, dst6, ip6->ip6_nxt, ifidx);
|
||||
}
|
||||
m_freem(m);
|
||||
goto done;
|
||||
|
@ -137,12 +143,21 @@ ip6_forward(struct mbuf *m, struct route *ro, int flags)
|
|||
* size of IPv6 + ICMPv6 headers) bytes of the packet in case
|
||||
* we need to generate an ICMP6 message to the src.
|
||||
* Thanks to M_EXT, in most cases copy will not occur.
|
||||
* For small packets copy original onto stack instead of mbuf.
|
||||
*
|
||||
* It is important to save it before IPsec processing as IPsec
|
||||
* processing may modify the mbuf.
|
||||
*/
|
||||
mcopy = m_copym(m, 0, imin(m->m_pkthdr.len, ICMPV6_PLD_MAXLEN),
|
||||
M_NOWAIT);
|
||||
icmp_len = min(m->m_pkthdr.len, ICMPV6_PLD_MAXLEN);
|
||||
if (icmp_len <= sizeof(icmp_buf)) {
|
||||
mflags = m->m_flags;
|
||||
pfflags = m->m_pkthdr.pf.flags;
|
||||
m_copydata(m, 0, icmp_len, icmp_buf);
|
||||
mcopy = NULL;
|
||||
} else {
|
||||
mcopy = m_copym(m, 0, icmp_len, M_NOWAIT);
|
||||
icmp_len = 0;
|
||||
}
|
||||
|
||||
#if NPF > 0
|
||||
reroute:
|
||||
|
@ -174,12 +189,10 @@ reroute:
|
|||
m->m_pkthdr.ph_rtableid);
|
||||
if (rt == NULL) {
|
||||
ip6stat_inc(ip6s_noroute);
|
||||
if (mcopy != NULL) {
|
||||
icmp6_error(mcopy, ICMP6_DST_UNREACH,
|
||||
ICMP6_DST_UNREACH_NOROUTE, 0);
|
||||
}
|
||||
type = ICMP6_DST_UNREACH;
|
||||
code = ICMP6_DST_UNREACH_NOROUTE;
|
||||
m_freem(m);
|
||||
goto done;
|
||||
goto icmperror;
|
||||
}
|
||||
dst = &ro->ro_dstsa;
|
||||
|
||||
|
@ -190,7 +203,7 @@ reroute:
|
|||
* unreachable error with Code 2 (beyond scope of source address).
|
||||
* [draft-ietf-ipngwg-icmp-v3-00.txt, Section 3.1]
|
||||
*/
|
||||
if (in6_addr2scopeid(m->m_pkthdr.ph_ifidx, &ip6->ip6_src) !=
|
||||
if (in6_addr2scopeid(ifidx, &ip6->ip6_src) !=
|
||||
in6_addr2scopeid(rt->rt_ifidx, &ip6->ip6_src)) {
|
||||
time_t uptime;
|
||||
|
||||
|
@ -205,15 +218,12 @@ reroute:
|
|||
log(LOG_DEBUG,
|
||||
"cannot forward "
|
||||
"src %s, dst %s, nxt %d, rcvif %u, outif %u\n",
|
||||
src6, dst6,
|
||||
ip6->ip6_nxt,
|
||||
m->m_pkthdr.ph_ifidx, rt->rt_ifidx);
|
||||
src6, dst6, ip6->ip6_nxt, ifidx, rt->rt_ifidx);
|
||||
}
|
||||
if (mcopy != NULL)
|
||||
icmp6_error(mcopy, ICMP6_DST_UNREACH,
|
||||
ICMP6_DST_UNREACH_BEYONDSCOPE, 0);
|
||||
type = ICMP6_DST_UNREACH;
|
||||
code = ICMP6_DST_UNREACH_BEYONDSCOPE;
|
||||
m_freem(m);
|
||||
goto done;
|
||||
goto icmperror;
|
||||
}
|
||||
|
||||
#ifdef IPSEC
|
||||
|
@ -248,7 +258,7 @@ reroute:
|
|||
m_freem(m);
|
||||
goto freecopy;
|
||||
}
|
||||
if (rt->rt_ifidx == m->m_pkthdr.ph_ifidx &&
|
||||
if (rt->rt_ifidx == ifidx &&
|
||||
ip6_sendredirects && !ISSET(flags, IPV6_REDIRECT) &&
|
||||
(rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0) {
|
||||
if ((ifp->if_flags & IFF_POINTOPOINT) &&
|
||||
|
@ -268,11 +278,10 @@ reroute:
|
|||
* type/code is based on suggestion by Rich Draves.
|
||||
* not sure if it is the best pick.
|
||||
*/
|
||||
if (mcopy != NULL)
|
||||
icmp6_error(mcopy, ICMP6_DST_UNREACH,
|
||||
ICMP6_DST_UNREACH_ADDR, 0);
|
||||
type = ICMP6_DST_UNREACH;
|
||||
code = ICMP6_DST_UNREACH_ADDR;
|
||||
m_freem(m);
|
||||
goto done;
|
||||
goto icmperror;
|
||||
}
|
||||
type = ND_REDIRECT;
|
||||
}
|
||||
|
@ -332,27 +341,41 @@ reroute:
|
|||
if (error || m == NULL)
|
||||
goto senderr;
|
||||
|
||||
if (mcopy != NULL)
|
||||
icmp6_error(mcopy, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
|
||||
type = ICMP6_PACKET_TOO_BIG;
|
||||
destmtu = ifp->if_mtu;
|
||||
m_freem(m);
|
||||
goto done;
|
||||
goto icmperror;
|
||||
|
||||
senderr:
|
||||
if (mcopy == NULL)
|
||||
if (mcopy == NULL && icmp_len == 0)
|
||||
goto done;
|
||||
|
||||
switch (error) {
|
||||
case 0:
|
||||
if (type == ND_REDIRECT) {
|
||||
icmp6_redirect_output(mcopy, rt);
|
||||
ip6stat_inc(ip6s_redirectsent);
|
||||
if (icmp_len != 0) {
|
||||
mcopy = m_gethdr(M_DONTWAIT, MT_DATA);
|
||||
if (mcopy == NULL)
|
||||
goto done;
|
||||
mcopy->m_len = mcopy->m_pkthdr.len = icmp_len;
|
||||
mcopy->m_flags |= (mflags & M_COPYFLAGS);
|
||||
mcopy->m_pkthdr.ph_rtableid = rtableid;
|
||||
mcopy->m_pkthdr.ph_ifidx = ifidx;
|
||||
mcopy->m_pkthdr.ph_loopcnt = loopcnt;
|
||||
mcopy->m_pkthdr.pf.flags |=
|
||||
(pfflags & PF_TAG_GENERATED);
|
||||
memcpy(mcopy->m_data, icmp_buf, icmp_len);
|
||||
}
|
||||
if (mcopy != NULL) {
|
||||
icmp6_redirect_output(mcopy, rt);
|
||||
ip6stat_inc(ip6s_redirectsent);
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
goto freecopy;
|
||||
|
||||
case EMSGSIZE:
|
||||
type = ICMP6_PACKET_TOO_BIG;
|
||||
code = 0;
|
||||
if (rt != NULL) {
|
||||
if (rt->rt_mtu) {
|
||||
destmtu = rt->rt_mtu;
|
||||
|
@ -390,7 +413,21 @@ senderr:
|
|||
code = ICMP6_DST_UNREACH_ADDR;
|
||||
break;
|
||||
}
|
||||
icmp6_error(mcopy, type, code, destmtu);
|
||||
icmperror:
|
||||
if (icmp_len != 0) {
|
||||
mcopy = m_gethdr(M_DONTWAIT, MT_DATA);
|
||||
if (mcopy == NULL)
|
||||
goto done;
|
||||
mcopy->m_len = mcopy->m_pkthdr.len = icmp_len;
|
||||
mcopy->m_flags |= (mflags & M_COPYFLAGS);
|
||||
mcopy->m_pkthdr.ph_rtableid = rtableid;
|
||||
mcopy->m_pkthdr.ph_ifidx = ifidx;
|
||||
mcopy->m_pkthdr.ph_loopcnt = loopcnt;
|
||||
mcopy->m_pkthdr.pf.flags |= (pfflags & PF_TAG_GENERATED);
|
||||
memcpy(mcopy->m_data, icmp_buf, icmp_len);
|
||||
}
|
||||
if (mcopy != NULL)
|
||||
icmp6_error(mcopy, type, code, destmtu);
|
||||
goto done;
|
||||
|
||||
freecopy:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue