sync
This commit is contained in:
parent
451579e149
commit
a2dd1eda92
89 changed files with 1343 additions and 775 deletions
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: dwmshc.c,v 1.4 2023/04/19 02:01:02 dlg Exp $ */
|
||||
/* $OpenBSD: dwmshc.c,v 1.5 2023/06/20 09:26:36 kettenis Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2023 David Gwynne <dlg@openbsd.org>
|
||||
|
@ -98,7 +98,7 @@
|
|||
#define EMMC_DLL_RXCLK_RX_CLK_OUT_SEL (1U << 27)
|
||||
#define EMMC_DLL_RXCLK_RX_CLK_CHANGE_WINDOW (1U << 28)
|
||||
#define EMMC_DLL_RXCLK_RX_CLK_SRC_SEL (1U << 29)
|
||||
#define EMMC_DLL_TXCLK 0x804
|
||||
#define EMMC_DLL_TXCLK 0x808
|
||||
#define EMMC_DLL_TXCLK_TX_TAP_NUM_SHIFT 0
|
||||
#define EMMC_DLL_TXCLK_TX_TAP_NUM_MASK 0x1f
|
||||
#define EMMC_DLL_TXCLK_TX_TAP_VALUE_SHIFT 8
|
||||
|
|
|
@ -267,7 +267,7 @@ typedef enum {
|
|||
#define AAC_ERROR_FIB_DEALLOCATION_FAILED 0x08
|
||||
|
||||
/*
|
||||
* Adapter Init Structure: this is passed to the adapter with the
|
||||
* Adapter Init Structure: this is passed to the adapter with the
|
||||
* AAC_MONKER_INITSTRUCT command to point it at our control structures.
|
||||
*/
|
||||
struct aac_adapter_init {
|
||||
|
@ -423,7 +423,7 @@ typedef enum {
|
|||
CPU_MIPS,
|
||||
CPU_XSCALE,
|
||||
CPU__last
|
||||
} AAC_CpuType;
|
||||
} AAC_CpuType;
|
||||
|
||||
typedef enum {
|
||||
CPUI960_JX = 1,
|
||||
|
@ -481,7 +481,7 @@ typedef enum {
|
|||
* XXX the aac-2622 with no battery present reports PLATFORM_BAT_OPT_PRESENT
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
{
|
||||
PLATFORM_BAT_REQ_PRESENT = 1, /* BATTERY REQUIRED AND PRESENT */
|
||||
PLATFORM_BAT_REQ_NOTPRESENT, /* BATTERY REQUIRED AND NOT PRESENT */
|
||||
PLATFORM_BAT_OPT_PRESENT, /* BATTERY OPTIONAL AND PRESENT */
|
||||
|
@ -489,9 +489,9 @@ typedef enum
|
|||
PLATFORM_BAT_NOT_SUPPORTED /* BATTERY NOT SUPPORTED */
|
||||
} AAC_BatteryPlatform;
|
||||
|
||||
/*
|
||||
/*
|
||||
* options supported by this board
|
||||
* there has to be a one to one mapping of these defines and the ones in
|
||||
* there has to be a one to one mapping of these defines and the ones in
|
||||
* fsaapi.h, search for FSA_SUPPORT_SNAPSHOT
|
||||
*/
|
||||
#define AAC_SUPPORTED_SNAPSHOT 0x01
|
||||
|
@ -508,7 +508,7 @@ typedef enum
|
|||
#define AAC_SUPPORTED_ALARM 0x800
|
||||
#define AAC_SUPPORTED_NONDASD 0x1000
|
||||
|
||||
/*
|
||||
/*
|
||||
* Structure used to respond to a RequestAdapterInfo fib.
|
||||
*/
|
||||
struct aac_adapter_info {
|
||||
|
@ -564,7 +564,7 @@ struct aac_adapter_info {
|
|||
#define AAC_KERNEL_PANIC 0x00000100
|
||||
|
||||
/*
|
||||
* Data types relating to control and monitoring of the NVRAM/WriteCache
|
||||
* Data types relating to control and monitoring of the NVRAM/WriteCache
|
||||
* subsystem.
|
||||
*/
|
||||
|
||||
|
@ -732,7 +732,7 @@ typedef enum {
|
|||
AifEnGeneric = 1, /* Generic notification */
|
||||
AifEnTaskComplete, /* Task has completed */
|
||||
AifEnConfigChange, /* Adapter config change occurred */
|
||||
AifEnContainerChange, /* Adapter specific container
|
||||
AifEnContainerChange, /* Adapter specific container
|
||||
* configuration change */
|
||||
AifEnDeviceFailure, /* SCSI device failed */
|
||||
AifEnMirrorFailover, /* Mirror failover started */
|
||||
|
@ -832,7 +832,7 @@ struct aac_AifEventNotify {
|
|||
/*
|
||||
* Adapter Initiated FIB command structures. Start with the adapter
|
||||
* initiated FIBs that really come from the adapter, and get responded
|
||||
* to by the host.
|
||||
* to by the host.
|
||||
*/
|
||||
#define AAC_AIF_REPORT_MAX_SIZE 64
|
||||
|
||||
|
@ -1378,7 +1378,7 @@ enum {
|
|||
/*
|
||||
* The adapter can request the host print a message by setting the
|
||||
* DB_PRINTF flag in DOORBELL0. The driver responds by collecting the
|
||||
* message from the printf buffer, clearing the DB_PRINTF flag in
|
||||
* message from the printf buffer, clearing the DB_PRINTF flag in
|
||||
* DOORBELL0 and setting it in DOORBELL1.
|
||||
* (ODBR and IDBR respectively for the i960Rx adapters)
|
||||
*/
|
||||
|
|
|
@ -127,10 +127,10 @@ struct aac_softc;
|
|||
#define AAC_SYNC_DELAY 20000
|
||||
|
||||
/*
|
||||
* The firmware interface allows for a 16-bit s/g list length. We limit
|
||||
* The firmware interface allows for a 16-bit s/g list length. We limit
|
||||
* ourselves to a reasonable maximum and ensure alignment.
|
||||
*/
|
||||
#define AAC_MAXSGENTRIES 64 /* max S/G entries, limit 65535 */
|
||||
#define AAC_MAXSGENTRIES 64 /* max S/G entries, limit 65535 */
|
||||
|
||||
/*
|
||||
* We gather a number of adapter-visible items into a single structure.
|
||||
|
@ -157,7 +157,7 @@ struct aac_common {
|
|||
|
||||
/* buffer for text messages from the controller */
|
||||
char ac_printf[AAC_PRINTF_BUFSIZE];
|
||||
|
||||
|
||||
/* fib for synchronous commands */
|
||||
struct aac_fib ac_sync_fib;
|
||||
};
|
||||
|
@ -348,7 +348,7 @@ struct aac_softc
|
|||
|
||||
/* command management */
|
||||
struct mutex aac_free_mtx;
|
||||
TAILQ_HEAD(,aac_command) aac_free; /* command structures
|
||||
TAILQ_HEAD(,aac_command) aac_free; /* command structures
|
||||
* available for reuse */
|
||||
TAILQ_HEAD(,aac_command) aac_ready; /* commands on hold for
|
||||
* controller resources */
|
||||
|
|
|
@ -989,7 +989,7 @@ encapped:
|
|||
} else
|
||||
rate = ni->ni_rates.rs_rates[ni->ni_txrate];
|
||||
rate &= IEEE80211_RATE_VAL;
|
||||
}
|
||||
}
|
||||
|
||||
#if NBPFILTER > 0
|
||||
if (ic->ic_rawbpf != NULL)
|
||||
|
@ -2375,7 +2375,7 @@ acx_set_probe_resp_tmplt(struct acx_softc *sc, struct ieee80211_node *ni)
|
|||
|
||||
m_copydata(m, 0, m->m_pkthdr.len, &resp.data);
|
||||
len = m->m_pkthdr.len + sizeof(resp.size);
|
||||
m_freem(m);
|
||||
m_freem(m);
|
||||
|
||||
return (acx_set_tmplt(sc, ACXCMD_TMPLT_PROBE_RESP, &resp, len));
|
||||
}
|
||||
|
|
|
@ -36,10 +36,10 @@
|
|||
*/
|
||||
/*
|
||||
* advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1995-1996 Advanced System Products, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that redistributions of source
|
||||
* code retain the above copyright notice and this comment without
|
||||
|
|
|
@ -553,7 +553,7 @@ aic_poll(struct aic_softc *sc, struct scsi_xfer *xs, int count)
|
|||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* LOW LEVEL SCSI UTILITIES
|
||||
*/
|
||||
|
@ -684,7 +684,7 @@ abort:
|
|||
aic_sched_msgout(sc, SEND_ABORT);
|
||||
return (1);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Schedule a SCSI operation. This has now been pulled out of the interrupt
|
||||
* handler so that we may call it from aic_scsi_cmd and aic_done. This may
|
||||
|
@ -726,7 +726,7 @@ aic_sched(struct aic_softc *sc)
|
|||
bus_space_write_1(iot, ioh, SIMODE1, ENSCSIRST);
|
||||
bus_space_write_1(iot, ioh, SCSISEQ, ENRESELI);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
aic_sense(struct aic_softc *sc, struct aic_acb *acb)
|
||||
{
|
||||
|
@ -831,7 +831,7 @@ aic_dequeue(struct aic_softc *sc, struct aic_acb *acb)
|
|||
TAILQ_REMOVE(&sc->ready_list, acb, chain);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* INTERRUPT/PROTOCOL ENGINE
|
||||
*/
|
||||
|
@ -1305,7 +1305,7 @@ out:
|
|||
/* Disable REQ/ACK protocol. */
|
||||
bus_space_write_1(iot, ioh, SXFRCTL0, CHEN);
|
||||
}
|
||||
|
||||
|
||||
/* aic_dataout_pio: perform a data transfer using the FIFO datapath in the aic6360
|
||||
* Precondition: The SCSI bus should be in the DOUT phase, with REQ asserted
|
||||
* and ACK deasserted (i.e. waiting for a data byte).
|
||||
|
@ -1446,7 +1446,7 @@ phasechange:
|
|||
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
/* aic_datain_pio: perform data transfers using the FIFO datapath in the aic6360
|
||||
* Precondition: The SCSI bus should be in the DIN phase, with REQ asserted
|
||||
* and ACK deasserted (i.e. at least one byte is ready).
|
||||
|
@ -1569,7 +1569,7 @@ phasechange:
|
|||
|
||||
return in;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This is the workhorse routine of the driver.
|
||||
* Deficiencies (for now):
|
||||
|
@ -1992,7 +1992,7 @@ aic_timeout(void *arg)
|
|||
|
||||
splx(s);
|
||||
}
|
||||
|
||||
|
||||
#ifdef AIC_DEBUG
|
||||
/*
|
||||
* The following functions are mostly used for debugging purposes, either
|
||||
|
|
|
@ -825,7 +825,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
|
|||
if (scb != NULL)
|
||||
ahc_set_transaction_status(scb,
|
||||
CAM_UNCOR_PARITY);
|
||||
ahc_reset_channel(ahc, devinfo.channel,
|
||||
ahc_reset_channel(ahc, devinfo.channel,
|
||||
/*init reset*/TRUE);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -823,7 +823,7 @@ struct seeprom_config {
|
|||
#define CFBIOS_BUSSCAN 0x0008 /* Have the BIOS Scan the Bus */
|
||||
#define CFSM2DRV 0x0010 /* support more than two drives */
|
||||
#define CFSTPWLEVEL 0x0010 /* Termination level control */
|
||||
#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */
|
||||
#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */
|
||||
#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
|
||||
#define CFTERM_MENU 0x0040 /* BIOS displays termination menu */
|
||||
#define CFEXTEND 0x0080 /* extended translation enabled */
|
||||
|
|
|
@ -80,9 +80,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
|
|||
static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
|
||||
struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
|
||||
struct amdgpu_bo_vm *vmbo;
|
||||
|
||||
bo = shadow_bo->parent;
|
||||
vmbo = to_amdgpu_bo_vm(bo);
|
||||
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
|
||||
if (!list_empty(&vmbo->shadow_list)) {
|
||||
|
@ -693,11 +694,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
|
|||
return r;
|
||||
|
||||
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
|
||||
INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
|
||||
/* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
|
||||
* is initialized.
|
||||
*/
|
||||
bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -714,6 +710,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
|
|||
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
|
||||
vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
|
||||
vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
|
||||
mutex_unlock(&adev->shadow_list_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
return r;
|
||||
}
|
||||
|
||||
(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
|
||||
amdgpu_bo_add_to_shadow_list(*vmbo);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -806,7 +806,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
|
|||
{
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
struct drm_buddy *mm = &mgr->mm;
|
||||
struct drm_buddy_block *block;
|
||||
struct amdgpu_vram_reservation *rsv;
|
||||
|
||||
drm_printf(printer, " vis usage:%llu\n",
|
||||
amdgpu_vram_mgr_vis_usage(mgr));
|
||||
|
@ -818,8 +818,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
|
|||
drm_buddy_print(mm, printer);
|
||||
|
||||
drm_printf(printer, "reserved:\n");
|
||||
list_for_each_entry(block, &mgr->reserved_pages, link)
|
||||
drm_buddy_block_print(mm, block, printer);
|
||||
list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
|
||||
drm_printf(printer, "%#018llx-%#018llx: %llu\n",
|
||||
rsv->start, rsv->start + rsv->size, rsv->size);
|
||||
mutex_unlock(&mgr->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
|
|||
u32 reference_clock = adev->clock.spll.reference_freq;
|
||||
u32 tmp;
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return reference_clock;
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
/* vbios says 48Mhz, but the actual freq is 100Mhz */
|
||||
return 10000;
|
||||
default:
|
||||
return reference_clock;
|
||||
}
|
||||
}
|
||||
|
||||
tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
|
||||
if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
|
||||
|
|
|
@ -137,7 +137,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
|
|||
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
|
||||
.pct_ideal_sdp_bw_after_urgent = 100.0,
|
||||
.pct_ideal_sdp_bw_after_urgent = 90.0,
|
||||
.pct_ideal_fabric_bw_after_urgent = 67.0,
|
||||
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
|
||||
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
|
||||
|
|
|
@ -2067,33 +2067,96 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
|
||||
uint32_t *gen_speed_override,
|
||||
uint32_t *lane_width_override)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
*gen_speed_override = 0xff;
|
||||
*lane_width_override = 0xff;
|
||||
|
||||
switch (adev->pdev->device) {
|
||||
case 0x73A0:
|
||||
case 0x73A1:
|
||||
case 0x73A2:
|
||||
case 0x73A3:
|
||||
case 0x73AB:
|
||||
case 0x73AE:
|
||||
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
|
||||
*lane_width_override = 6;
|
||||
break;
|
||||
case 0x73E0:
|
||||
case 0x73E1:
|
||||
case 0x73E3:
|
||||
*lane_width_override = 4;
|
||||
break;
|
||||
case 0x7420:
|
||||
case 0x7421:
|
||||
case 0x7422:
|
||||
case 0x7423:
|
||||
case 0x7424:
|
||||
*lane_width_override = 3;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef MAX
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
#endif
|
||||
|
||||
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
|
||||
uint32_t smu_pcie_arg;
|
||||
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t gen_speed_override, lane_width_override;
|
||||
uint8_t *table_member1, *table_member2;
|
||||
uint32_t min_gen_speed, max_gen_speed;
|
||||
uint32_t min_lane_width, max_lane_width;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
|
||||
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
|
||||
|
||||
/* lclk dpm table setup */
|
||||
for (i = 0; i < MAX_PCIE_CONF; i++) {
|
||||
dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i];
|
||||
dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i];
|
||||
sienna_cichlid_get_override_pcie_settings(smu,
|
||||
&gen_speed_override,
|
||||
&lane_width_override);
|
||||
|
||||
/* PCIE gen speed override */
|
||||
if (gen_speed_override != 0xff) {
|
||||
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
|
||||
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
|
||||
} else {
|
||||
min_gen_speed = MAX(0, table_member1[0]);
|
||||
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
|
||||
min_gen_speed = min_gen_speed > max_gen_speed ?
|
||||
max_gen_speed : min_gen_speed;
|
||||
}
|
||||
pcie_table->pcie_gen[0] = min_gen_speed;
|
||||
pcie_table->pcie_gen[1] = max_gen_speed;
|
||||
|
||||
/* PCIE lane width override */
|
||||
if (lane_width_override != 0xff) {
|
||||
min_lane_width = MIN(pcie_width_cap, lane_width_override);
|
||||
max_lane_width = MIN(pcie_width_cap, lane_width_override);
|
||||
} else {
|
||||
min_lane_width = MAX(1, table_member2[0]);
|
||||
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
|
||||
min_lane_width = min_lane_width > max_lane_width ?
|
||||
max_lane_width : min_lane_width;
|
||||
}
|
||||
pcie_table->pcie_lane[0] = min_lane_width;
|
||||
pcie_table->pcie_lane[1] = max_lane_width;
|
||||
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
smu_pcie_arg = (i << 16) |
|
||||
((table_member1[i] <= pcie_gen_cap) ?
|
||||
(table_member1[i] << 8) :
|
||||
(pcie_gen_cap << 8)) |
|
||||
((table_member2[i] <= pcie_width_cap) ?
|
||||
table_member2[i] :
|
||||
pcie_width_cap);
|
||||
smu_pcie_arg = (i << 16 |
|
||||
pcie_table->pcie_gen[i] << 8 |
|
||||
pcie_table->pcie_lane[i]);
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
|
@ -2101,11 +2164,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
|
|||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (table_member1[i] > pcie_gen_cap)
|
||||
dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
|
||||
if (table_member2[i] > pcie_width_cap)
|
||||
dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -582,11 +582,11 @@ int smu_v13_0_init_power(struct smu_context *smu)
|
|||
if (smu_power->power_context || smu_power->power_context_size != 0)
|
||||
return -EINVAL;
|
||||
|
||||
smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
|
||||
smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
|
||||
GFP_KERNEL);
|
||||
if (!smu_power->power_context)
|
||||
return -ENOMEM;
|
||||
smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
|
||||
smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -119,6 +119,32 @@ static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
|||
return index ? 0 : 1;
|
||||
}
|
||||
|
||||
static int intel_dp_aux_sync_len(void)
|
||||
{
|
||||
int precharge = 16; /* 10-16 */
|
||||
int preamble = 16;
|
||||
|
||||
return precharge + preamble;
|
||||
}
|
||||
|
||||
static int intel_dp_aux_fw_sync_len(void)
|
||||
{
|
||||
int precharge = 10; /* 10-16 */
|
||||
int preamble = 8;
|
||||
|
||||
return precharge + preamble;
|
||||
}
|
||||
|
||||
static int g4x_dp_aux_precharge_len(void)
|
||||
{
|
||||
int precharge_min = 10;
|
||||
int preamble = 16;
|
||||
|
||||
/* HW wants the length of the extra precharge in 2us units */
|
||||
return (intel_dp_aux_sync_len() -
|
||||
precharge_min - preamble) / 2;
|
||||
}
|
||||
|
||||
static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
int send_bytes,
|
||||
u32 aux_clock_divider)
|
||||
|
@ -141,7 +167,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|||
timeout |
|
||||
DP_AUX_CH_CTL_RECEIVE_ERROR |
|
||||
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
(3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
|
||||
(g4x_dp_aux_precharge_len() << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
|
||||
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
|
||||
}
|
||||
|
||||
|
@ -165,8 +191,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|||
DP_AUX_CH_CTL_TIME_OUT_MAX |
|
||||
DP_AUX_CH_CTL_RECEIVE_ERROR |
|
||||
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) |
|
||||
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
|
||||
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) |
|
||||
DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
|
||||
|
||||
if (intel_tc_port_in_tbt_alt_mode(dig_port))
|
||||
ret |= DP_AUX_CH_CTL_TBT_IO;
|
||||
|
|
|
@ -179,97 +179,108 @@ out_file:
|
|||
}
|
||||
|
||||
struct parallel_switch {
|
||||
struct task_struct *tsk;
|
||||
struct kthread_worker *worker;
|
||||
struct kthread_work work;
|
||||
struct intel_context *ce[2];
|
||||
int result;
|
||||
};
|
||||
|
||||
static int __live_parallel_switch1(void *data)
|
||||
static void __live_parallel_switch1(struct kthread_work *work)
|
||||
{
|
||||
struct parallel_switch *arg = data;
|
||||
struct parallel_switch *arg =
|
||||
container_of(work, typeof(*arg), work);
|
||||
IGT_TIMEOUT(end_time);
|
||||
unsigned long count;
|
||||
|
||||
count = 0;
|
||||
arg->result = 0;
|
||||
do {
|
||||
struct i915_request *rq = NULL;
|
||||
int err, n;
|
||||
int n;
|
||||
|
||||
err = 0;
|
||||
for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
|
||||
for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
|
||||
struct i915_request *prev = rq;
|
||||
|
||||
rq = i915_request_create(arg->ce[n]);
|
||||
if (IS_ERR(rq)) {
|
||||
i915_request_put(prev);
|
||||
return PTR_ERR(rq);
|
||||
arg->result = PTR_ERR(rq);
|
||||
break;
|
||||
}
|
||||
|
||||
i915_request_get(rq);
|
||||
if (prev) {
|
||||
err = i915_request_await_dma_fence(rq, &prev->fence);
|
||||
arg->result =
|
||||
i915_request_await_dma_fence(rq,
|
||||
&prev->fence);
|
||||
i915_request_put(prev);
|
||||
}
|
||||
|
||||
i915_request_add(rq);
|
||||
}
|
||||
|
||||
if (IS_ERR_OR_NULL(rq))
|
||||
break;
|
||||
|
||||
if (i915_request_wait(rq, 0, HZ) < 0)
|
||||
err = -ETIME;
|
||||
arg->result = -ETIME;
|
||||
|
||||
i915_request_put(rq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
count++;
|
||||
} while (!__igt_timeout(end_time, NULL));
|
||||
} while (!arg->result && !__igt_timeout(end_time, NULL));
|
||||
|
||||
pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
|
||||
return 0;
|
||||
pr_info("%s: %lu switches (sync) <%d>\n",
|
||||
arg->ce[0]->engine->name, count, arg->result);
|
||||
}
|
||||
|
||||
static int __live_parallel_switchN(void *data)
|
||||
static void __live_parallel_switchN(struct kthread_work *work)
|
||||
{
|
||||
struct parallel_switch *arg = data;
|
||||
struct parallel_switch *arg =
|
||||
container_of(work, typeof(*arg), work);
|
||||
struct i915_request *rq = NULL;
|
||||
IGT_TIMEOUT(end_time);
|
||||
unsigned long count;
|
||||
int n;
|
||||
|
||||
count = 0;
|
||||
arg->result = 0;
|
||||
do {
|
||||
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
|
||||
for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
|
||||
struct i915_request *prev = rq;
|
||||
int err = 0;
|
||||
|
||||
rq = i915_request_create(arg->ce[n]);
|
||||
if (IS_ERR(rq)) {
|
||||
i915_request_put(prev);
|
||||
return PTR_ERR(rq);
|
||||
arg->result = PTR_ERR(rq);
|
||||
break;
|
||||
}
|
||||
|
||||
i915_request_get(rq);
|
||||
if (prev) {
|
||||
err = i915_request_await_dma_fence(rq, &prev->fence);
|
||||
arg->result =
|
||||
i915_request_await_dma_fence(rq,
|
||||
&prev->fence);
|
||||
i915_request_put(prev);
|
||||
}
|
||||
|
||||
i915_request_add(rq);
|
||||
if (err) {
|
||||
i915_request_put(rq);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
count++;
|
||||
} while (!__igt_timeout(end_time, NULL));
|
||||
i915_request_put(rq);
|
||||
} while (!arg->result && !__igt_timeout(end_time, NULL));
|
||||
|
||||
pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
|
||||
return 0;
|
||||
if (!IS_ERR_OR_NULL(rq))
|
||||
i915_request_put(rq);
|
||||
|
||||
pr_info("%s: %lu switches (many) <%d>\n",
|
||||
arg->ce[0]->engine->name, count, arg->result);
|
||||
}
|
||||
|
||||
static int live_parallel_switch(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
static int (* const func[])(void *arg) = {
|
||||
static void (* const func[])(struct kthread_work *) = {
|
||||
__live_parallel_switch1,
|
||||
__live_parallel_switchN,
|
||||
NULL,
|
||||
|
@ -277,7 +288,7 @@ static int live_parallel_switch(void *arg)
|
|||
struct parallel_switch *data = NULL;
|
||||
struct i915_gem_engines *engines;
|
||||
struct i915_gem_engines_iter it;
|
||||
int (* const *fn)(void *arg);
|
||||
void (* const *fn)(struct kthread_work *);
|
||||
struct i915_gem_context *ctx;
|
||||
struct intel_context *ce;
|
||||
struct file *file;
|
||||
|
@ -335,8 +346,10 @@ static int live_parallel_switch(void *arg)
|
|||
continue;
|
||||
|
||||
ce = intel_context_create(data[m].ce[0]->engine);
|
||||
if (IS_ERR(ce))
|
||||
if (IS_ERR(ce)) {
|
||||
err = PTR_ERR(ce);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = intel_context_pin(ce);
|
||||
if (err) {
|
||||
|
@ -348,9 +361,24 @@ static int live_parallel_switch(void *arg)
|
|||
}
|
||||
}
|
||||
|
||||
for (n = 0; n < count; n++) {
|
||||
struct kthread_worker *worker;
|
||||
|
||||
if (!data[n].ce[0])
|
||||
continue;
|
||||
|
||||
worker = kthread_create_worker(0, "igt/parallel:%s",
|
||||
data[n].ce[0]->engine->name);
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
goto out;
|
||||
}
|
||||
|
||||
data[n].worker = worker;
|
||||
}
|
||||
|
||||
for (fn = func; !err && *fn; fn++) {
|
||||
struct igt_live_test t;
|
||||
int n;
|
||||
|
||||
err = igt_live_test_begin(&t, i915, __func__, "");
|
||||
if (err)
|
||||
|
@ -360,34 +388,23 @@ static int live_parallel_switch(void *arg)
|
|||
if (!data[n].ce[0])
|
||||
continue;
|
||||
|
||||
data[n].tsk = kthread_run(*fn, &data[n],
|
||||
"igt/parallel:%s",
|
||||
data[n].ce[0]->engine->name);
|
||||
if (IS_ERR(data[n].tsk)) {
|
||||
err = PTR_ERR(data[n].tsk);
|
||||
break;
|
||||
}
|
||||
get_task_struct(data[n].tsk);
|
||||
data[n].result = 0;
|
||||
kthread_init_work(&data[n].work, *fn);
|
||||
kthread_queue_work(data[n].worker, &data[n].work);
|
||||
}
|
||||
|
||||
yield(); /* start all threads before we kthread_stop() */
|
||||
|
||||
for (n = 0; n < count; n++) {
|
||||
int status;
|
||||
|
||||
if (IS_ERR_OR_NULL(data[n].tsk))
|
||||
continue;
|
||||
|
||||
status = kthread_stop(data[n].tsk);
|
||||
if (status && !err)
|
||||
err = status;
|
||||
|
||||
put_task_struct(data[n].tsk);
|
||||
data[n].tsk = NULL;
|
||||
if (data[n].ce[0]) {
|
||||
kthread_flush_work(&data[n].work);
|
||||
if (data[n].result && !err)
|
||||
err = data[n].result;
|
||||
}
|
||||
}
|
||||
|
||||
if (igt_live_test_end(&t))
|
||||
err = -EIO;
|
||||
if (igt_live_test_end(&t)) {
|
||||
err = err ?: -EIO;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -399,6 +416,9 @@ out:
|
|||
intel_context_unpin(data[n].ce[m]);
|
||||
intel_context_put(data[n].ce[m]);
|
||||
}
|
||||
|
||||
if (data[n].worker)
|
||||
kthread_destroy_worker(data[n].worker);
|
||||
}
|
||||
kfree(data);
|
||||
out_file:
|
||||
|
|
|
@ -1532,8 +1532,8 @@ static int live_busywait_preempt(void *arg)
|
|||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
enum intel_engine_id id;
|
||||
int err = -ENOMEM;
|
||||
u32 *map;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
|
||||
|
@ -1541,13 +1541,17 @@ static int live_busywait_preempt(void *arg)
|
|||
*/
|
||||
|
||||
ctx_hi = kernel_context(gt->i915, NULL);
|
||||
if (!ctx_hi)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(ctx_hi))
|
||||
return PTR_ERR(ctx_hi);
|
||||
|
||||
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
|
||||
|
||||
ctx_lo = kernel_context(gt->i915, NULL);
|
||||
if (!ctx_lo)
|
||||
if (IS_ERR(ctx_lo)) {
|
||||
err = PTR_ERR(ctx_lo);
|
||||
goto err_ctx_hi;
|
||||
}
|
||||
|
||||
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
|
||||
|
||||
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
|
||||
|
@ -3475,12 +3479,14 @@ static int random_priority(struct rnd_state *rnd)
|
|||
|
||||
struct preempt_smoke {
|
||||
struct intel_gt *gt;
|
||||
struct kthread_work work;
|
||||
struct i915_gem_context **contexts;
|
||||
struct intel_engine_cs *engine;
|
||||
struct drm_i915_gem_object *batch;
|
||||
unsigned int ncontext;
|
||||
struct rnd_state prng;
|
||||
unsigned long count;
|
||||
int result;
|
||||
};
|
||||
|
||||
static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
|
||||
|
@ -3540,34 +3546,31 @@ unpin:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int smoke_crescendo_thread(void *arg)
|
||||
static void smoke_crescendo_work(struct kthread_work *work)
|
||||
{
|
||||
struct preempt_smoke *smoke = arg;
|
||||
struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work);
|
||||
IGT_TIMEOUT(end_time);
|
||||
unsigned long count;
|
||||
|
||||
count = 0;
|
||||
do {
|
||||
struct i915_gem_context *ctx = smoke_context(smoke);
|
||||
int err;
|
||||
|
||||
err = smoke_submit(smoke,
|
||||
ctx, count % I915_PRIORITY_MAX,
|
||||
smoke->batch);
|
||||
if (err)
|
||||
return err;
|
||||
smoke->result = smoke_submit(smoke, ctx,
|
||||
count % I915_PRIORITY_MAX,
|
||||
smoke->batch);
|
||||
|
||||
count++;
|
||||
} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
|
||||
} while (!smoke->result && count < smoke->ncontext &&
|
||||
!__igt_timeout(end_time, NULL));
|
||||
|
||||
smoke->count = count;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
|
||||
#define BATCH BIT(0)
|
||||
{
|
||||
struct task_struct *tsk[I915_NUM_ENGINES] = {};
|
||||
struct kthread_worker *worker[I915_NUM_ENGINES] = {};
|
||||
struct preempt_smoke *arg;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
@ -3578,6 +3581,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
|
|||
if (!arg)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg));
|
||||
|
||||
for_each_engine(engine, smoke->gt, id) {
|
||||
arg[id] = *smoke;
|
||||
arg[id].engine = engine;
|
||||
|
@ -3585,31 +3590,28 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
|
|||
arg[id].batch = NULL;
|
||||
arg[id].count = 0;
|
||||
|
||||
tsk[id] = kthread_run(smoke_crescendo_thread, arg,
|
||||
"igt/smoke:%d", id);
|
||||
if (IS_ERR(tsk[id])) {
|
||||
err = PTR_ERR(tsk[id]);
|
||||
worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
|
||||
if (IS_ERR(worker[id])) {
|
||||
err = PTR_ERR(worker[id]);
|
||||
break;
|
||||
}
|
||||
get_task_struct(tsk[id]);
|
||||
}
|
||||
|
||||
yield(); /* start all threads before we kthread_stop() */
|
||||
kthread_init_work(&arg[id].work, smoke_crescendo_work);
|
||||
kthread_queue_work(worker[id], &arg[id].work);
|
||||
}
|
||||
|
||||
count = 0;
|
||||
for_each_engine(engine, smoke->gt, id) {
|
||||
int status;
|
||||
|
||||
if (IS_ERR_OR_NULL(tsk[id]))
|
||||
if (IS_ERR_OR_NULL(worker[id]))
|
||||
continue;
|
||||
|
||||
status = kthread_stop(tsk[id]);
|
||||
if (status && !err)
|
||||
err = status;
|
||||
kthread_flush_work(&arg[id].work);
|
||||
if (arg[id].result && !err)
|
||||
err = arg[id].result;
|
||||
|
||||
count += arg[id].count;
|
||||
|
||||
put_task_struct(tsk[id]);
|
||||
kthread_destroy_worker(worker[id]);
|
||||
}
|
||||
|
||||
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
|
||||
|
|
|
@ -866,10 +866,13 @@ static int igt_reset_active_engine(void *arg)
|
|||
}
|
||||
|
||||
struct active_engine {
|
||||
struct task_struct *task;
|
||||
struct kthread_worker *worker;
|
||||
struct kthread_work work;
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned long resets;
|
||||
unsigned int flags;
|
||||
bool stop;
|
||||
int result;
|
||||
};
|
||||
|
||||
#define TEST_ACTIVE BIT(0)
|
||||
|
@ -900,10 +903,10 @@ static int active_request_put(struct i915_request *rq)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int active_engine(void *data)
|
||||
static void active_engine(struct kthread_work *work)
|
||||
{
|
||||
I915_RND_STATE(prng);
|
||||
struct active_engine *arg = data;
|
||||
struct active_engine *arg = container_of(work, typeof(*arg), work);
|
||||
struct intel_engine_cs *engine = arg->engine;
|
||||
struct i915_request *rq[8] = {};
|
||||
struct intel_context *ce[ARRAY_SIZE(rq)];
|
||||
|
@ -913,16 +916,17 @@ static int active_engine(void *data)
|
|||
for (count = 0; count < ARRAY_SIZE(ce); count++) {
|
||||
ce[count] = intel_context_create(engine);
|
||||
if (IS_ERR(ce[count])) {
|
||||
err = PTR_ERR(ce[count]);
|
||||
pr_err("[%s] Create context #%ld failed: %d!\n", engine->name, count, err);
|
||||
arg->result = PTR_ERR(ce[count]);
|
||||
pr_err("[%s] Create context #%ld failed: %d!\n",
|
||||
engine->name, count, arg->result);
|
||||
while (--count)
|
||||
intel_context_put(ce[count]);
|
||||
return err;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
count = 0;
|
||||
while (!kthread_should_stop()) {
|
||||
while (!READ_ONCE(arg->stop)) {
|
||||
unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
|
||||
struct i915_request *old = rq[idx];
|
||||
struct i915_request *new;
|
||||
|
@ -967,7 +971,7 @@ static int active_engine(void *data)
|
|||
intel_context_put(ce[count]);
|
||||
}
|
||||
|
||||
return err;
|
||||
arg->result = err;
|
||||
}
|
||||
|
||||
static int __igt_reset_engines(struct intel_gt *gt,
|
||||
|
@ -1022,7 +1026,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
|
|||
|
||||
memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES);
|
||||
for_each_engine(other, gt, tmp) {
|
||||
struct task_struct *tsk;
|
||||
struct kthread_worker *worker;
|
||||
|
||||
threads[tmp].resets =
|
||||
i915_reset_engine_count(global, other);
|
||||
|
@ -1036,19 +1040,21 @@ static int __igt_reset_engines(struct intel_gt *gt,
|
|||
threads[tmp].engine = other;
|
||||
threads[tmp].flags = flags;
|
||||
|
||||
tsk = kthread_run(active_engine, &threads[tmp],
|
||||
"igt/%s", other->name);
|
||||
if (IS_ERR(tsk)) {
|
||||
err = PTR_ERR(tsk);
|
||||
pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
|
||||
worker = kthread_create_worker(0, "igt/%s",
|
||||
other->name);
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
pr_err("[%s] Worker create failed: %d!\n",
|
||||
engine->name, err);
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
threads[tmp].task = tsk;
|
||||
get_task_struct(tsk);
|
||||
}
|
||||
threads[tmp].worker = worker;
|
||||
|
||||
yield(); /* start all threads before we begin */
|
||||
kthread_init_work(&threads[tmp].work, active_engine);
|
||||
kthread_queue_work(threads[tmp].worker,
|
||||
&threads[tmp].work);
|
||||
}
|
||||
|
||||
st_engine_heartbeat_disable_no_pm(engine);
|
||||
GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
|
||||
|
@ -1197,17 +1203,20 @@ unwind:
|
|||
for_each_engine(other, gt, tmp) {
|
||||
int ret;
|
||||
|
||||
if (!threads[tmp].task)
|
||||
if (!threads[tmp].worker)
|
||||
continue;
|
||||
|
||||
ret = kthread_stop(threads[tmp].task);
|
||||
WRITE_ONCE(threads[tmp].stop, true);
|
||||
kthread_flush_work(&threads[tmp].work);
|
||||
ret = READ_ONCE(threads[tmp].result);
|
||||
if (ret) {
|
||||
pr_err("kthread for other engine %s failed, err=%d\n",
|
||||
other->name, ret);
|
||||
if (!err)
|
||||
err = ret;
|
||||
}
|
||||
put_task_struct(threads[tmp].task);
|
||||
|
||||
kthread_destroy_worker(threads[tmp].worker);
|
||||
|
||||
/* GuC based resets are not logged per engine */
|
||||
if (!using_guc) {
|
||||
|
|
|
@ -299,9 +299,18 @@ __live_request_alloc(struct intel_context *ce)
|
|||
return intel_context_create_request(ce);
|
||||
}
|
||||
|
||||
static int __igt_breadcrumbs_smoketest(void *arg)
|
||||
struct smoke_thread {
|
||||
struct kthread_worker *worker;
|
||||
struct kthread_work work;
|
||||
struct smoketest *t;
|
||||
bool stop;
|
||||
int result;
|
||||
};
|
||||
|
||||
static void __igt_breadcrumbs_smoketest(struct kthread_work *work)
|
||||
{
|
||||
struct smoketest *t = arg;
|
||||
struct smoke_thread *thread = container_of(work, typeof(*thread), work);
|
||||
struct smoketest *t = thread->t;
|
||||
const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
|
||||
const unsigned int total = 4 * t->ncontexts + 1;
|
||||
unsigned int num_waits = 0, num_fences = 0;
|
||||
|
@ -320,8 +329,10 @@ static int __igt_breadcrumbs_smoketest(void *arg)
|
|||
*/
|
||||
|
||||
requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
|
||||
if (!requests)
|
||||
return -ENOMEM;
|
||||
if (!requests) {
|
||||
thread->result = -ENOMEM;
|
||||
return;
|
||||
}
|
||||
|
||||
order = i915_random_order(total, &prng);
|
||||
if (!order) {
|
||||
|
@ -329,7 +340,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
|
|||
goto out_requests;
|
||||
}
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
while (!READ_ONCE(thread->stop)) {
|
||||
struct i915_sw_fence *submit, *wait;
|
||||
unsigned int n, count;
|
||||
|
||||
|
@ -437,7 +448,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
|
|||
kfree(order);
|
||||
out_requests:
|
||||
kfree(requests);
|
||||
return err;
|
||||
thread->result = err;
|
||||
}
|
||||
|
||||
static int mock_breadcrumbs_smoketest(void *arg)
|
||||
|
@ -450,7 +461,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
|
|||
.request_alloc = __mock_request_alloc
|
||||
};
|
||||
unsigned int ncpus = num_online_cpus();
|
||||
struct task_struct **threads;
|
||||
struct smoke_thread *threads;
|
||||
unsigned int n;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -479,28 +490,37 @@ static int mock_breadcrumbs_smoketest(void *arg)
|
|||
}
|
||||
|
||||
for (n = 0; n < ncpus; n++) {
|
||||
threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
|
||||
&t, "igt/%d", n);
|
||||
if (IS_ERR(threads[n])) {
|
||||
ret = PTR_ERR(threads[n]);
|
||||
struct kthread_worker *worker;
|
||||
|
||||
worker = kthread_create_worker(0, "igt/%d", n);
|
||||
if (IS_ERR(worker)) {
|
||||
ret = PTR_ERR(worker);
|
||||
ncpus = n;
|
||||
break;
|
||||
}
|
||||
|
||||
get_task_struct(threads[n]);
|
||||
threads[n].worker = worker;
|
||||
threads[n].t = &t;
|
||||
threads[n].stop = false;
|
||||
threads[n].result = 0;
|
||||
|
||||
kthread_init_work(&threads[n].work,
|
||||
__igt_breadcrumbs_smoketest);
|
||||
kthread_queue_work(worker, &threads[n].work);
|
||||
}
|
||||
|
||||
yield(); /* start all threads before we begin */
|
||||
drm_msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
|
||||
|
||||
for (n = 0; n < ncpus; n++) {
|
||||
int err;
|
||||
|
||||
err = kthread_stop(threads[n]);
|
||||
WRITE_ONCE(threads[n].stop, true);
|
||||
kthread_flush_work(&threads[n].work);
|
||||
err = READ_ONCE(threads[n].result);
|
||||
if (err < 0 && !ret)
|
||||
ret = err;
|
||||
|
||||
put_task_struct(threads[n]);
|
||||
kthread_destroy_worker(threads[n].worker);
|
||||
}
|
||||
pr_info("Completed %lu waits for %lu fence across %d cpus\n",
|
||||
atomic_long_read(&t.num_waits),
|
||||
|
@ -1419,9 +1439,18 @@ out_free:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int __live_parallel_engine1(void *arg)
|
||||
struct parallel_thread {
|
||||
struct kthread_worker *worker;
|
||||
struct kthread_work work;
|
||||
struct intel_engine_cs *engine;
|
||||
int result;
|
||||
};
|
||||
|
||||
static void __live_parallel_engine1(struct kthread_work *work)
|
||||
{
|
||||
struct intel_engine_cs *engine = arg;
|
||||
struct parallel_thread *thread =
|
||||
container_of(work, typeof(*thread), work);
|
||||
struct intel_engine_cs *engine = thread->engine;
|
||||
IGT_TIMEOUT(end_time);
|
||||
unsigned long count;
|
||||
int err = 0;
|
||||
|
@ -1452,12 +1481,14 @@ static int __live_parallel_engine1(void *arg)
|
|||
intel_engine_pm_put(engine);
|
||||
|
||||
pr_info("%s: %lu request + sync\n", engine->name, count);
|
||||
return err;
|
||||
thread->result = err;
|
||||
}
|
||||
|
||||
static int __live_parallel_engineN(void *arg)
|
||||
static void __live_parallel_engineN(struct kthread_work *work)
|
||||
{
|
||||
struct intel_engine_cs *engine = arg;
|
||||
struct parallel_thread *thread =
|
||||
container_of(work, typeof(*thread), work);
|
||||
struct intel_engine_cs *engine = thread->engine;
|
||||
IGT_TIMEOUT(end_time);
|
||||
unsigned long count;
|
||||
int err = 0;
|
||||
|
@ -1479,7 +1510,7 @@ static int __live_parallel_engineN(void *arg)
|
|||
intel_engine_pm_put(engine);
|
||||
|
||||
pr_info("%s: %lu requests\n", engine->name, count);
|
||||
return err;
|
||||
thread->result = err;
|
||||
}
|
||||
|
||||
static bool wake_all(struct drm_i915_private *i915)
|
||||
|
@ -1505,9 +1536,11 @@ static int wait_for_all(struct drm_i915_private *i915)
|
|||
return -ETIME;
|
||||
}
|
||||
|
||||
static int __live_parallel_spin(void *arg)
|
||||
static void __live_parallel_spin(struct kthread_work *work)
|
||||
{
|
||||
struct intel_engine_cs *engine = arg;
|
||||
struct parallel_thread *thread =
|
||||
container_of(work, typeof(*thread), work);
|
||||
struct intel_engine_cs *engine = thread->engine;
|
||||
struct igt_spinner spin;
|
||||
struct i915_request *rq;
|
||||
int err = 0;
|
||||
|
@ -1520,7 +1553,8 @@ static int __live_parallel_spin(void *arg)
|
|||
|
||||
if (igt_spinner_init(&spin, engine->gt)) {
|
||||
wake_all(engine->i915);
|
||||
return -ENOMEM;
|
||||
thread->result = -ENOMEM;
|
||||
return;
|
||||
}
|
||||
|
||||
intel_engine_pm_get(engine);
|
||||
|
@ -1553,22 +1587,22 @@ static int __live_parallel_spin(void *arg)
|
|||
|
||||
out_spin:
|
||||
igt_spinner_fini(&spin);
|
||||
return err;
|
||||
thread->result = err;
|
||||
}
|
||||
|
||||
static int live_parallel_engines(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
static int (* const func[])(void *arg) = {
|
||||
static void (* const func[])(struct kthread_work *) = {
|
||||
__live_parallel_engine1,
|
||||
__live_parallel_engineN,
|
||||
__live_parallel_spin,
|
||||
NULL,
|
||||
};
|
||||
const unsigned int nengines = num_uabi_engines(i915);
|
||||
struct parallel_thread *threads;
|
||||
struct intel_engine_cs *engine;
|
||||
int (* const *fn)(void *arg);
|
||||
struct task_struct **tsk;
|
||||
void (* const *fn)(struct kthread_work *);
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
|
@ -1576,8 +1610,8 @@ static int live_parallel_engines(void *arg)
|
|||
* tests that we load up the system maximally.
|
||||
*/
|
||||
|
||||
tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
|
||||
if (!tsk)
|
||||
threads = kcalloc(nengines, sizeof(*threads), GFP_KERNEL);
|
||||
if (!threads)
|
||||
return -ENOMEM;
|
||||
|
||||
for (fn = func; !err && *fn; fn++) {
|
||||
|
@ -1594,37 +1628,44 @@ static int live_parallel_engines(void *arg)
|
|||
|
||||
idx = 0;
|
||||
for_each_uabi_engine(engine, i915) {
|
||||
tsk[idx] = kthread_run(*fn, engine,
|
||||
"igt/parallel:%s",
|
||||
engine->name);
|
||||
if (IS_ERR(tsk[idx])) {
|
||||
err = PTR_ERR(tsk[idx]);
|
||||
struct kthread_worker *worker;
|
||||
|
||||
worker = kthread_create_worker(0, "igt/parallel:%s",
|
||||
engine->name);
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
break;
|
||||
}
|
||||
get_task_struct(tsk[idx++]);
|
||||
}
|
||||
|
||||
yield(); /* start all threads before we kthread_stop() */
|
||||
threads[idx].worker = worker;
|
||||
threads[idx].result = 0;
|
||||
threads[idx].engine = engine;
|
||||
|
||||
kthread_init_work(&threads[idx].work, *fn);
|
||||
kthread_queue_work(worker, &threads[idx].work);
|
||||
idx++;
|
||||
}
|
||||
|
||||
idx = 0;
|
||||
for_each_uabi_engine(engine, i915) {
|
||||
int status;
|
||||
|
||||
if (IS_ERR(tsk[idx]))
|
||||
if (!threads[idx].worker)
|
||||
break;
|
||||
|
||||
status = kthread_stop(tsk[idx]);
|
||||
kthread_flush_work(&threads[idx].work);
|
||||
status = READ_ONCE(threads[idx].result);
|
||||
if (status && !err)
|
||||
err = status;
|
||||
|
||||
put_task_struct(tsk[idx++]);
|
||||
kthread_destroy_worker(threads[idx++].worker);
|
||||
}
|
||||
|
||||
if (igt_live_test_end(&t))
|
||||
err = -EIO;
|
||||
}
|
||||
|
||||
kfree(tsk);
|
||||
kfree(threads);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1672,7 +1713,7 @@ static int live_breadcrumbs_smoketest(void *arg)
|
|||
const unsigned int ncpus = num_online_cpus();
|
||||
unsigned long num_waits, num_fences;
|
||||
struct intel_engine_cs *engine;
|
||||
struct task_struct **threads;
|
||||
struct smoke_thread *threads;
|
||||
struct igt_live_test live;
|
||||
intel_wakeref_t wakeref;
|
||||
struct smoketest *smoke;
|
||||
|
@ -1746,23 +1787,26 @@ static int live_breadcrumbs_smoketest(void *arg)
|
|||
smoke[idx].max_batch, engine->name);
|
||||
|
||||
for (n = 0; n < ncpus; n++) {
|
||||
struct task_struct *tsk;
|
||||
unsigned int i = idx * ncpus + n;
|
||||
struct kthread_worker *worker;
|
||||
|
||||
tsk = kthread_run(__igt_breadcrumbs_smoketest,
|
||||
&smoke[idx], "igt/%d.%d", idx, n);
|
||||
if (IS_ERR(tsk)) {
|
||||
ret = PTR_ERR(tsk);
|
||||
worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
|
||||
if (IS_ERR(worker)) {
|
||||
ret = PTR_ERR(worker);
|
||||
goto out_flush;
|
||||
}
|
||||
|
||||
get_task_struct(tsk);
|
||||
threads[idx * ncpus + n] = tsk;
|
||||
threads[i].worker = worker;
|
||||
threads[i].t = &smoke[idx];
|
||||
|
||||
kthread_init_work(&threads[i].work,
|
||||
__igt_breadcrumbs_smoketest);
|
||||
kthread_queue_work(worker, &threads[i].work);
|
||||
}
|
||||
|
||||
idx++;
|
||||
}
|
||||
|
||||
yield(); /* start all threads before we begin */
|
||||
drm_msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
|
||||
|
||||
out_flush:
|
||||
|
@ -1771,17 +1815,19 @@ out_flush:
|
|||
num_fences = 0;
|
||||
for_each_uabi_engine(engine, i915) {
|
||||
for (n = 0; n < ncpus; n++) {
|
||||
struct task_struct *tsk = threads[idx * ncpus + n];
|
||||
unsigned int i = idx * ncpus + n;
|
||||
int err;
|
||||
|
||||
if (!tsk)
|
||||
if (!threads[i].worker)
|
||||
continue;
|
||||
|
||||
err = kthread_stop(tsk);
|
||||
WRITE_ONCE(threads[i].stop, true);
|
||||
kthread_flush_work(&threads[i].work);
|
||||
err = READ_ONCE(threads[i].result);
|
||||
if (err < 0 && !ret)
|
||||
ret = err;
|
||||
|
||||
put_task_struct(tsk);
|
||||
kthread_destroy_worker(threads[i].worker);
|
||||
}
|
||||
|
||||
num_waits += atomic_long_read(&smoke[idx].num_waits);
|
||||
|
@ -2891,9 +2937,18 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int p_sync0(void *arg)
|
||||
struct p_thread {
|
||||
struct perf_stats p;
|
||||
struct kthread_worker *worker;
|
||||
struct kthread_work work;
|
||||
struct intel_engine_cs *engine;
|
||||
int result;
|
||||
};
|
||||
|
||||
static void p_sync0(struct kthread_work *work)
|
||||
{
|
||||
struct perf_stats *p = arg;
|
||||
struct p_thread *thread = container_of(work, typeof(*thread), work);
|
||||
struct perf_stats *p = &thread->p;
|
||||
struct intel_engine_cs *engine = p->engine;
|
||||
struct intel_context *ce;
|
||||
IGT_TIMEOUT(end_time);
|
||||
|
@ -2902,13 +2957,16 @@ static int p_sync0(void *arg)
|
|||
int err = 0;
|
||||
|
||||
ce = intel_context_create(engine);
|
||||
if (IS_ERR(ce))
|
||||
return PTR_ERR(ce);
|
||||
if (IS_ERR(ce)) {
|
||||
thread->result = PTR_ERR(ce);
|
||||
return;
|
||||
}
|
||||
|
||||
err = intel_context_pin(ce);
|
||||
if (err) {
|
||||
intel_context_put(ce);
|
||||
return err;
|
||||
thread->result = err;
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_engine_supports_stats(engine)) {
|
||||
|
@ -2958,12 +3016,13 @@ static int p_sync0(void *arg)
|
|||
|
||||
intel_context_unpin(ce);
|
||||
intel_context_put(ce);
|
||||
return err;
|
||||
thread->result = err;
|
||||
}
|
||||
|
||||
static int p_sync1(void *arg)
|
||||
static void p_sync1(struct kthread_work *work)
|
||||
{
|
||||
struct perf_stats *p = arg;
|
||||
struct p_thread *thread = container_of(work, typeof(*thread), work);
|
||||
struct perf_stats *p = &thread->p;
|
||||
struct intel_engine_cs *engine = p->engine;
|
||||
struct i915_request *prev = NULL;
|
||||
struct intel_context *ce;
|
||||
|
@ -2973,13 +3032,16 @@ static int p_sync1(void *arg)
|
|||
int err = 0;
|
||||
|
||||
ce = intel_context_create(engine);
|
||||
if (IS_ERR(ce))
|
||||
return PTR_ERR(ce);
|
||||
if (IS_ERR(ce)) {
|
||||
thread->result = PTR_ERR(ce);
|
||||
return;
|
||||
}
|
||||
|
||||
err = intel_context_pin(ce);
|
||||
if (err) {
|
||||
intel_context_put(ce);
|
||||
return err;
|
||||
thread->result = err;
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_engine_supports_stats(engine)) {
|
||||
|
@ -3031,12 +3093,13 @@ static int p_sync1(void *arg)
|
|||
|
||||
intel_context_unpin(ce);
|
||||
intel_context_put(ce);
|
||||
return err;
|
||||
thread->result = err;
|
||||
}
|
||||
|
||||
static int p_many(void *arg)
|
||||
static void p_many(struct kthread_work *work)
|
||||
{
|
||||
struct perf_stats *p = arg;
|
||||
struct p_thread *thread = container_of(work, typeof(*thread), work);
|
||||
struct perf_stats *p = &thread->p;
|
||||
struct intel_engine_cs *engine = p->engine;
|
||||
struct intel_context *ce;
|
||||
IGT_TIMEOUT(end_time);
|
||||
|
@ -3045,13 +3108,16 @@ static int p_many(void *arg)
|
|||
bool busy;
|
||||
|
||||
ce = intel_context_create(engine);
|
||||
if (IS_ERR(ce))
|
||||
return PTR_ERR(ce);
|
||||
if (IS_ERR(ce)) {
|
||||
thread->result = PTR_ERR(ce);
|
||||
return;
|
||||
}
|
||||
|
||||
err = intel_context_pin(ce);
|
||||
if (err) {
|
||||
intel_context_put(ce);
|
||||
return err;
|
||||
thread->result = err;
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_engine_supports_stats(engine)) {
|
||||
|
@ -3092,26 +3158,23 @@ static int p_many(void *arg)
|
|||
|
||||
intel_context_unpin(ce);
|
||||
intel_context_put(ce);
|
||||
return err;
|
||||
thread->result = err;
|
||||
}
|
||||
|
||||
static int perf_parallel_engines(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
static int (* const func[])(void *arg) = {
|
||||
static void (* const func[])(struct kthread_work *) = {
|
||||
p_sync0,
|
||||
p_sync1,
|
||||
p_many,
|
||||
NULL,
|
||||
};
|
||||
const unsigned int nengines = num_uabi_engines(i915);
|
||||
void (* const *fn)(struct kthread_work *);
|
||||
struct intel_engine_cs *engine;
|
||||
int (* const *fn)(void *arg);
|
||||
struct pm_qos_request qos;
|
||||
struct {
|
||||
struct perf_stats p;
|
||||
struct task_struct *tsk;
|
||||
} *engines;
|
||||
struct p_thread *engines;
|
||||
int err = 0;
|
||||
|
||||
engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
|
||||
|
@ -3134,36 +3197,45 @@ static int perf_parallel_engines(void *arg)
|
|||
|
||||
idx = 0;
|
||||
for_each_uabi_engine(engine, i915) {
|
||||
struct kthread_worker *worker;
|
||||
|
||||
intel_engine_pm_get(engine);
|
||||
|
||||
memset(&engines[idx].p, 0, sizeof(engines[idx].p));
|
||||
engines[idx].p.engine = engine;
|
||||
|
||||
engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
|
||||
"igt:%s", engine->name);
|
||||
if (IS_ERR(engines[idx].tsk)) {
|
||||
err = PTR_ERR(engines[idx].tsk);
|
||||
worker = kthread_create_worker(0, "igt:%s",
|
||||
engine->name);
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
intel_engine_pm_put(engine);
|
||||
break;
|
||||
}
|
||||
get_task_struct(engines[idx++].tsk);
|
||||
}
|
||||
engines[idx].worker = worker;
|
||||
engines[idx].result = 0;
|
||||
engines[idx].p.engine = engine;
|
||||
engines[idx].engine = engine;
|
||||
|
||||
yield(); /* start all threads before we kthread_stop() */
|
||||
kthread_init_work(&engines[idx].work, *fn);
|
||||
kthread_queue_work(worker, &engines[idx].work);
|
||||
idx++;
|
||||
}
|
||||
|
||||
idx = 0;
|
||||
for_each_uabi_engine(engine, i915) {
|
||||
int status;
|
||||
|
||||
if (IS_ERR(engines[idx].tsk))
|
||||
if (!engines[idx].worker)
|
||||
break;
|
||||
|
||||
status = kthread_stop(engines[idx].tsk);
|
||||
kthread_flush_work(&engines[idx].work);
|
||||
status = READ_ONCE(engines[idx].result);
|
||||
if (status && !err)
|
||||
err = status;
|
||||
|
||||
intel_engine_pm_put(engine);
|
||||
put_task_struct(engines[idx++].tsk);
|
||||
|
||||
kthread_destroy_worker(engines[idx].worker);
|
||||
idx++;
|
||||
}
|
||||
|
||||
if (igt_live_test_end(&t))
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: sched_bsd.c,v 1.74 2023/02/04 19:33:03 cheloha Exp $ */
|
||||
/* $OpenBSD: sched_bsd.c,v 1.75 2023/06/20 16:30:30 cheloha Exp $ */
|
||||
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
|
||||
|
||||
/*-
|
||||
|
@ -234,7 +234,6 @@ schedcpu(void *arg)
|
|||
}
|
||||
SCHED_UNLOCK(s);
|
||||
}
|
||||
uvm_meter();
|
||||
wakeup(&lbolt);
|
||||
timeout_add_sec(to, 1);
|
||||
}
|
||||
|
@ -669,6 +668,7 @@ scheduler_start(void)
|
|||
|
||||
rrticks_init = hz / 10;
|
||||
schedcpu(&schedcpu_to);
|
||||
uvm_meter(NULL);
|
||||
|
||||
#ifndef SMALL_KERNEL
|
||||
if (perfpolicy == PERFPOL_AUTO)
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
|
||||
ENTRY(bcopy)
|
||||
/* switch the source and destination registers */
|
||||
eor r0, r1, r0
|
||||
eor r1, r0, r1
|
||||
eor r0, r1, r0
|
||||
eor r0, r1, r0
|
||||
eor r1, r0, r1
|
||||
eor r0, r1, r0
|
||||
b PIC_SYM(memmove, PLT)
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
#include <machine/asm.h>
|
||||
|
||||
/*
|
||||
/*
|
||||
* stack is aligned as there's a possibility of branching to L_overflow
|
||||
* which makes a C call
|
||||
*/
|
||||
|
@ -51,9 +51,9 @@ L_overflow:
|
|||
|
||||
ENTRY(__udivsi3)
|
||||
L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */
|
||||
eor r0, r1, r0
|
||||
eor r1, r0, r1
|
||||
eor r0, r1, r0
|
||||
eor r0, r1, r0
|
||||
eor r1, r0, r1
|
||||
eor r0, r1, r0
|
||||
/* r0 = r1 / r0; r1 = r1 % r0 */
|
||||
cmp r0, #1
|
||||
bcc L_overflow
|
||||
|
@ -73,9 +73,9 @@ L_divide_l0: /* r0 == 1 */
|
|||
|
||||
ENTRY(__divsi3)
|
||||
L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */
|
||||
eor r0, r1, r0
|
||||
eor r1, r0, r1
|
||||
eor r0, r1, r0
|
||||
eor r0, r1, r0
|
||||
eor r1, r0, r1
|
||||
eor r0, r1, r0
|
||||
/* r0 = r1 / r0; r1 = r1 % r0 */
|
||||
cmp r0, #1
|
||||
bcc L_overflow
|
||||
|
@ -94,7 +94,7 @@ L_divide_l1:
|
|||
|
||||
/*
|
||||
* If the highest bit of the dividend is set, we have to be
|
||||
* careful when shifting the divisor. Test this.
|
||||
* careful when shifting the divisor. Test this.
|
||||
*/
|
||||
movs r1,r1
|
||||
bpl L_old_code
|
||||
|
|
|
@ -39,4 +39,4 @@ _PROF_PROLOGUE
|
|||
and r1, r0, #0xff
|
||||
mov r0, r0, lsr #8
|
||||
orr r0, r0, r1, lsl #8
|
||||
mov pc, lr
|
||||
mov pc, lr
|
||||
|
|
|
@ -77,7 +77,7 @@ ENTRY_NP(memmove)
|
|||
|
||||
bcc Lmemcpy_backwards
|
||||
|
||||
/* start of forwards copy */
|
||||
/* start of forwards copy */
|
||||
subs r2, r2, #4
|
||||
blt Lmemcpy_fl4 /* less than 4 bytes */
|
||||
ands r12, r0, #3
|
||||
|
@ -89,28 +89,28 @@ Lmemcpy_ft8:
|
|||
/* We have aligned source and destination */
|
||||
subs r2, r2, #8
|
||||
blt Lmemcpy_fl12 /* less than 12 bytes (4 from above) */
|
||||
subs r2, r2, #0x14
|
||||
subs r2, r2, #0x14
|
||||
blt Lmemcpy_fl32 /* less than 32 bytes (12 from above) */
|
||||
stmdb sp!, {r4} /* borrow r4 */
|
||||
|
||||
/* blat 32 bytes at a time */
|
||||
/* XXX for really big copies perhaps we should use more registers */
|
||||
Lmemcpy_floop32:
|
||||
Lmemcpy_floop32:
|
||||
ldmia r1!, {r3, r4, r12, lr}
|
||||
stmia r0!, {r3, r4, r12, lr}
|
||||
ldmia r1!, {r3, r4, r12, lr}
|
||||
stmia r0!, {r3, r4, r12, lr}
|
||||
subs r2, r2, #0x20
|
||||
subs r2, r2, #0x20
|
||||
bge Lmemcpy_floop32
|
||||
|
||||
cmn r2, #0x10
|
||||
ldmiage r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
|
||||
stmiage r0!, {r3, r4, r12, lr}
|
||||
subge r2, r2, #0x10
|
||||
subge r2, r2, #0x10
|
||||
ldmia sp!, {r4} /* return r4 */
|
||||
|
||||
Lmemcpy_fl32:
|
||||
adds r2, r2, #0x14
|
||||
adds r2, r2, #0x14
|
||||
|
||||
/* blat 12 bytes at a time */
|
||||
Lmemcpy_floop12:
|
||||
|
@ -174,9 +174,9 @@ Lmemcpy_fsrcul:
|
|||
cmp r12, #2
|
||||
bgt Lmemcpy_fsrcul3
|
||||
beq Lmemcpy_fsrcul2
|
||||
cmp r2, #0x0c
|
||||
cmp r2, #0x0c
|
||||
blt Lmemcpy_fsrcul1loop4
|
||||
sub r2, r2, #0x0c
|
||||
sub r2, r2, #0x0c
|
||||
stmdb sp!, {r4, r5}
|
||||
|
||||
Lmemcpy_fsrcul1loop16:
|
||||
|
@ -190,10 +190,10 @@ Lmemcpy_fsrcul1loop16:
|
|||
mov r12, r12, lsr #8
|
||||
orr r12, r12, lr, lsl #24
|
||||
stmia r0!, {r3-r5, r12}
|
||||
subs r2, r2, #0x10
|
||||
subs r2, r2, #0x10
|
||||
bge Lmemcpy_fsrcul1loop16
|
||||
ldmia sp!, {r4, r5}
|
||||
adds r2, r2, #0x0c
|
||||
adds r2, r2, #0x0c
|
||||
blt Lmemcpy_fsrcul1l4
|
||||
|
||||
Lmemcpy_fsrcul1loop4:
|
||||
|
@ -209,9 +209,9 @@ Lmemcpy_fsrcul1l4:
|
|||
b Lmemcpy_fl4
|
||||
|
||||
Lmemcpy_fsrcul2:
|
||||
cmp r2, #0x0c
|
||||
cmp r2, #0x0c
|
||||
blt Lmemcpy_fsrcul2loop4
|
||||
sub r2, r2, #0x0c
|
||||
sub r2, r2, #0x0c
|
||||
stmdb sp!, {r4, r5}
|
||||
|
||||
Lmemcpy_fsrcul2loop16:
|
||||
|
@ -225,10 +225,10 @@ Lmemcpy_fsrcul2loop16:
|
|||
mov r12, r12, lsr #16
|
||||
orr r12, r12, lr, lsl #16
|
||||
stmia r0!, {r3-r5, r12}
|
||||
subs r2, r2, #0x10
|
||||
subs r2, r2, #0x10
|
||||
bge Lmemcpy_fsrcul2loop16
|
||||
ldmia sp!, {r4, r5}
|
||||
adds r2, r2, #0x0c
|
||||
adds r2, r2, #0x0c
|
||||
blt Lmemcpy_fsrcul2l4
|
||||
|
||||
Lmemcpy_fsrcul2loop4:
|
||||
|
@ -244,9 +244,9 @@ Lmemcpy_fsrcul2l4:
|
|||
b Lmemcpy_fl4
|
||||
|
||||
Lmemcpy_fsrcul3:
|
||||
cmp r2, #0x0c
|
||||
cmp r2, #0x0c
|
||||
blt Lmemcpy_fsrcul3loop4
|
||||
sub r2, r2, #0x0c
|
||||
sub r2, r2, #0x0c
|
||||
stmdb sp!, {r4, r5}
|
||||
|
||||
Lmemcpy_fsrcul3loop16:
|
||||
|
@ -260,10 +260,10 @@ Lmemcpy_fsrcul3loop16:
|
|||
mov r12, r12, lsr #24
|
||||
orr r12, r12, lr, lsl #8
|
||||
stmia r0!, {r3-r5, r12}
|
||||
subs r2, r2, #0x10
|
||||
subs r2, r2, #0x10
|
||||
bge Lmemcpy_fsrcul3loop16
|
||||
ldmia sp!, {r4, r5}
|
||||
adds r2, r2, #0x0c
|
||||
adds r2, r2, #0x0c
|
||||
blt Lmemcpy_fsrcul3l4
|
||||
|
||||
Lmemcpy_fsrcul3loop4:
|
||||
|
@ -303,18 +303,18 @@ Lmemcpy_bloop32:
|
|||
stmdb r0!, {r3, r4, r12, lr}
|
||||
ldmdb r1!, {r3, r4, r12, lr}
|
||||
stmdb r0!, {r3, r4, r12, lr}
|
||||
subs r2, r2, #0x20
|
||||
subs r2, r2, #0x20
|
||||
bge Lmemcpy_bloop32
|
||||
|
||||
Lmemcpy_bl32:
|
||||
cmn r2, #0x10
|
||||
cmn r2, #0x10
|
||||
ldmdbge r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
|
||||
stmdbge r0!, {r3, r4, r12, lr}
|
||||
subge r2, r2, #0x10
|
||||
adds r2, r2, #0x14
|
||||
subge r2, r2, #0x10
|
||||
adds r2, r2, #0x14
|
||||
ldmdbge r1!, {r3, r12, lr} /* blat a remaining 12 bytes */
|
||||
stmdbge r0!, {r3, r12, lr}
|
||||
subge r2, r2, #0x0c
|
||||
subge r2, r2, #0x0c
|
||||
ldmia sp!, {r4}
|
||||
|
||||
Lmemcpy_bl12:
|
||||
|
@ -366,9 +366,9 @@ Lmemcpy_bsrcul:
|
|||
cmp r12, #2
|
||||
blt Lmemcpy_bsrcul1
|
||||
beq Lmemcpy_bsrcul2
|
||||
cmp r2, #0x0c
|
||||
cmp r2, #0x0c
|
||||
blt Lmemcpy_bsrcul3loop4
|
||||
sub r2, r2, #0x0c
|
||||
sub r2, r2, #0x0c
|
||||
stmdb sp!, {r4, r5}
|
||||
|
||||
Lmemcpy_bsrcul3loop16:
|
||||
|
@ -382,10 +382,10 @@ Lmemcpy_bsrcul3loop16:
|
|||
mov r4, r4, lsl #8
|
||||
orr r4, r4, r3, lsr #24
|
||||
stmdb r0!, {r4, r5, r12, lr}
|
||||
subs r2, r2, #0x10
|
||||
subs r2, r2, #0x10
|
||||
bge Lmemcpy_bsrcul3loop16
|
||||
ldmia sp!, {r4, r5}
|
||||
adds r2, r2, #0x0c
|
||||
adds r2, r2, #0x0c
|
||||
blt Lmemcpy_bsrcul3l4
|
||||
|
||||
Lmemcpy_bsrcul3loop4:
|
||||
|
@ -401,9 +401,9 @@ Lmemcpy_bsrcul3l4:
|
|||
b Lmemcpy_bl4
|
||||
|
||||
Lmemcpy_bsrcul2:
|
||||
cmp r2, #0x0c
|
||||
cmp r2, #0x0c
|
||||
blt Lmemcpy_bsrcul2loop4
|
||||
sub r2, r2, #0x0c
|
||||
sub r2, r2, #0x0c
|
||||
stmdb sp!, {r4, r5}
|
||||
|
||||
Lmemcpy_bsrcul2loop16:
|
||||
|
@ -417,10 +417,10 @@ Lmemcpy_bsrcul2loop16:
|
|||
mov r4, r4, lsl #16
|
||||
orr r4, r4, r3, lsr #16
|
||||
stmdb r0!, {r4, r5, r12, lr}
|
||||
subs r2, r2, #0x10
|
||||
subs r2, r2, #0x10
|
||||
bge Lmemcpy_bsrcul2loop16
|
||||
ldmia sp!, {r4, r5}
|
||||
adds r2, r2, #0x0c
|
||||
adds r2, r2, #0x0c
|
||||
blt Lmemcpy_bsrcul2l4
|
||||
|
||||
Lmemcpy_bsrcul2loop4:
|
||||
|
@ -436,9 +436,9 @@ Lmemcpy_bsrcul2l4:
|
|||
b Lmemcpy_bl4
|
||||
|
||||
Lmemcpy_bsrcul1:
|
||||
cmp r2, #0x0c
|
||||
cmp r2, #0x0c
|
||||
blt Lmemcpy_bsrcul1loop4
|
||||
sub r2, r2, #0x0c
|
||||
sub r2, r2, #0x0c
|
||||
stmdb sp!, {r4, r5}
|
||||
|
||||
Lmemcpy_bsrcul1loop32:
|
||||
|
@ -452,10 +452,10 @@ Lmemcpy_bsrcul1loop32:
|
|||
mov r4, r4, lsl #24
|
||||
orr r4, r4, r3, lsr #8
|
||||
stmdb r0!, {r4, r5, r12, lr}
|
||||
subs r2, r2, #0x10
|
||||
subs r2, r2, #0x10
|
||||
bge Lmemcpy_bsrcul1loop32
|
||||
ldmia sp!, {r4, r5}
|
||||
adds r2, r2, #0x0c
|
||||
adds r2, r2, #0x0c
|
||||
blt Lmemcpy_bsrcul1l4
|
||||
|
||||
Lmemcpy_bsrcul1loop4:
|
||||
|
@ -469,4 +469,4 @@ Lmemcpy_bsrcul1loop4:
|
|||
Lmemcpy_bsrcul1l4:
|
||||
add r1, r1, #1
|
||||
b Lmemcpy_bl4
|
||||
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* Written by Christian Weisgerber <naddy@openbsd.org>.
|
||||
* Public domain.
|
||||
*/
|
||||
|
||||
|
||||
#include <machine/asm.h>
|
||||
|
||||
ENTRY(ffs)
|
||||
|
|
|
@ -80,7 +80,7 @@ memset(void *dst0, int c0, size_t length)
|
|||
*
|
||||
* but we use a minimum of 3 here since the overhead of the code
|
||||
* to do word writes is substantial.
|
||||
*/
|
||||
*/
|
||||
if (length < 3 * wsize) {
|
||||
while (length != 0) {
|
||||
*dst++ = VAL;
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
*
|
||||
* u = 2^n u1 * u0 (n = number of bits in `u_int', usu. 32)
|
||||
*
|
||||
* and
|
||||
* and
|
||||
*
|
||||
* v = 2^n v1 * v0
|
||||
*
|
||||
|
|
|
@ -176,7 +176,7 @@ __qdivrem(u_quad_t uq, u_quad_t vq, u_quad_t *arq)
|
|||
v2 = v[2]; /* for D3 */
|
||||
do {
|
||||
digit uj0, uj1, uj2;
|
||||
|
||||
|
||||
/*
|
||||
* D3: Calculate qhat (\^q, in TeX notation).
|
||||
* Let qhat = min((u[j]*B + u[j+1])/v[1], B-1), and
|
||||
|
|
|
@ -43,10 +43,10 @@ strncasecmp(const char *s1, const char *s2, size_t n)
|
|||
unsigned char c2 = (unsigned char) *s2++;
|
||||
|
||||
if (c1 != c2) {
|
||||
if (c1 >= 'A' && c1 <= 'Z' &&
|
||||
if (c1 >= 'A' && c1 <= 'Z' &&
|
||||
c2 >= 'a' && c2 <= 'z')
|
||||
c1 += 'a' - 'A';
|
||||
else if (c1 >= 'a' && c1 <= 'z' &&
|
||||
else if (c1 >= 'a' && c1 <= 'z' &&
|
||||
c2 >= 'A' && c2 <= 'Z')
|
||||
c2 += 'a' - 'A';
|
||||
if (c1 != c2)
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <lib/libsa/arc4.h>
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
#include <lib/libsa/stand.h>
|
||||
#include <sys/param.h>
|
||||
|
||||
|
||||
#include "bcrypt_pbkdf.h"
|
||||
#include "blowfish.h"
|
||||
#include "sha2.h"
|
||||
|
|
|
@ -76,7 +76,7 @@ struct bootp {
|
|||
*/
|
||||
#define VM_RFC1048 { 99, 130, 83, 99 }
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* RFC1048 tag values used to specify what information is being supplied in
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#define MARK_END 4
|
||||
#define MARK_RANDOM 5
|
||||
#define MARK_ERANDOM 6
|
||||
#define MARK_VENTRY 7
|
||||
#define MARK_VENTRY 7
|
||||
#define MARK_MAX 8
|
||||
|
||||
/*
|
||||
|
|
|
@ -176,7 +176,7 @@ reswitch: switch (ch) {
|
|||
}
|
||||
kprintn64(put, ull, 10, width, padchar);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
ul = lflag ?
|
||||
va_arg(ap, long) : va_arg(ap, int);
|
||||
|
@ -192,7 +192,7 @@ reswitch: switch (ch) {
|
|||
ull = va_arg(ap, u_int64_t);
|
||||
kprintn64(put, ull, 8, width, padchar);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
ul = lflag ?
|
||||
va_arg(ap, u_long) : va_arg(ap, u_int);
|
||||
|
@ -204,7 +204,7 @@ reswitch: switch (ch) {
|
|||
ull = va_arg(ap, u_int64_t);
|
||||
kprintn64(put, ull, 10, width, padchar);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
ul = lflag ?
|
||||
va_arg(ap, u_long) : va_arg(ap, u_int);
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* SHA-1 in C
|
||||
* By Steve Reid <steve@edmweb.com>
|
||||
* 100% Public Domain
|
||||
*
|
||||
*
|
||||
* Test Vectors (from FIPS PUB 180-1)
|
||||
* "abc"
|
||||
* A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
/*
|
||||
* FILE: sha2.c
|
||||
* AUTHOR: Aaron D. Gifford <me@aarongifford.com>
|
||||
*
|
||||
*
|
||||
* Copyright (c) 2000-2001, Aaron D. Gifford
|
||||
* All rights reserved.
|
||||
*
|
||||
|
@ -18,7 +18,7 @@
|
|||
* 3. Neither the name of the copyright holder nor the names of contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
|
@ -76,7 +76,7 @@
|
|||
*
|
||||
* And for little-endian machines, add:
|
||||
*
|
||||
* #define BYTE_ORDER LITTLE_ENDIAN
|
||||
* #define BYTE_ORDER LITTLE_ENDIAN
|
||||
*
|
||||
* Or for big-endian machines:
|
||||
*
|
||||
|
@ -466,11 +466,11 @@ SHA256Transform(u_int32_t state[8], const u_int8_t data[SHA256_BLOCK_LENGTH])
|
|||
/* Part of the message block expansion: */
|
||||
s0 = W256[(j+1)&0x0f];
|
||||
s0 = sigma0_256(s0);
|
||||
s1 = W256[(j+14)&0x0f];
|
||||
s1 = W256[(j+14)&0x0f];
|
||||
s1 = sigma1_256(s1);
|
||||
|
||||
/* Apply the SHA-256 compression function to update a..h */
|
||||
T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] +
|
||||
T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] +
|
||||
(W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0);
|
||||
T2 = Sigma0_256(a) + Maj(a, b, c);
|
||||
h = g;
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
/*
|
||||
* FILE: sha2.h
|
||||
* AUTHOR: Aaron D. Gifford <me@aarongifford.com>
|
||||
*
|
||||
*
|
||||
* Copyright (c) 2000-2001, Aaron D. Gifford
|
||||
* All rights reserved.
|
||||
*
|
||||
|
@ -18,7 +18,7 @@
|
|||
* 3. Neither the name of the copyright holder nor the names of contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
|
|
|
@ -96,7 +96,7 @@ fusefs_lookup(void *v)
|
|||
*/
|
||||
if ((error = VOP_ACCESS(vdp, VWRITE, cred,
|
||||
cnp->cn_proc)) != 0)
|
||||
return (error);
|
||||
return (error);
|
||||
|
||||
cnp->cn_flags |= SAVENAME;
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ filt_fusefsvnode(struct knote *kn, long int hint)
|
|||
|
||||
/*
|
||||
* FUSE file systems can maintain a file handle for each VFS file descriptor
|
||||
* that is opened. The OpenBSD VFS does not make file descriptors visible to
|
||||
* that is opened. The OpenBSD VFS does not make file descriptors visible to
|
||||
* us so we fake it by mapping open flags to file handles.
|
||||
* There is no way for FUSE to know which file descriptor is being used
|
||||
* by an application for a file operation. We only maintain 3 descriptors,
|
||||
|
@ -360,7 +360,7 @@ fusefs_access(void *v)
|
|||
ip = VTOI(ap->a_vp);
|
||||
fmp = (struct fusefs_mnt *)ip->ufs_ino.i_ump;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Only user that mounted the file system can access it unless
|
||||
* allow_other mount option was specified.
|
||||
*/
|
||||
|
@ -411,7 +411,7 @@ fusefs_getattr(void *v)
|
|||
ip = VTOI(vp);
|
||||
fmp = (struct fusefs_mnt *)ip->ufs_ino.i_ump;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Only user that mounted the file system can access it unless
|
||||
* allow_other mount option was specified. Return dummy values
|
||||
* for the root inode in this situation.
|
||||
|
@ -853,7 +853,7 @@ fusefs_inactive(void *v)
|
|||
|
||||
/*
|
||||
* FUSE file systems expect the same flags to be sent
|
||||
* on release that were sent on open. We don't have a
|
||||
* on release that were sent on open. We don't have a
|
||||
* record of them so make a best guess.
|
||||
*/
|
||||
switch (type) {
|
||||
|
|
|
@ -356,7 +356,7 @@ if_idxmap_alloc(struct ifnet *ifp)
|
|||
if_map = mallocarray(limit, sizeof(*if_map), M_IFADDR,
|
||||
M_WAITOK | M_ZERO);
|
||||
if_map[0] = (struct ifnet *)(uintptr_t)limit;
|
||||
|
||||
|
||||
for (i = 1; i < olimit; i++) {
|
||||
struct ifnet *oifp = SMR_PTR_GET_LOCKED(&oif_map[i]);
|
||||
if (oifp == NULL)
|
||||
|
@ -3506,6 +3506,6 @@ net_tq_barriers(const char *wmesg)
|
|||
refcnt_take(&r);
|
||||
task_add(softnets[i].sn_taskq, &barriers[i]);
|
||||
}
|
||||
|
||||
|
||||
refcnt_finalize(&r, wmesg);
|
||||
}
|
||||
|
|
|
@ -219,7 +219,7 @@ etherip_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
|
|||
return (EIO);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return (ether_output(ifp, m, dst, rt));
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: uvm_extern.h,v 1.168 2023/05/30 08:30:01 jsg Exp $ */
|
||||
/* $OpenBSD: uvm_extern.h,v 1.169 2023/06/20 16:30:30 cheloha Exp $ */
|
||||
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -414,7 +414,7 @@ void uvmspace_free(struct vmspace *);
|
|||
struct vmspace *uvmspace_share(struct process *);
|
||||
int uvm_share(vm_map_t, vaddr_t, vm_prot_t,
|
||||
vm_map_t, vaddr_t, vsize_t);
|
||||
void uvm_meter(void);
|
||||
void uvm_meter(void *);
|
||||
int uvm_sysctl(int *, u_int, void *, size_t *,
|
||||
void *, size_t, struct proc *);
|
||||
struct vm_page *uvm_pagealloc(struct uvm_object *,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: uvm_meter.c,v 1.42 2020/12/28 14:01:23 mpi Exp $ */
|
||||
/* $OpenBSD: uvm_meter.c,v 1.43 2023/06/20 16:30:30 cheloha Exp $ */
|
||||
/* $NetBSD: uvm_meter.c,v 1.21 2001/07/14 06:36:03 matt Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -65,6 +65,9 @@
|
|||
int maxslp = MAXSLP; /* patchable ... */
|
||||
struct loadavg averunnable;
|
||||
|
||||
#define UVM_METER_INTVL 5
|
||||
struct timeout uvm_meter_to = TIMEOUT_INITIALIZER(uvm_meter, NULL);
|
||||
|
||||
/*
|
||||
* constants for averages over 1, 5, and 15 minutes when sampling at
|
||||
* 5 second intervals.
|
||||
|
@ -82,15 +85,13 @@ void uvm_total(struct vmtotal *);
|
|||
void uvmexp_read(struct uvmexp *);
|
||||
|
||||
/*
|
||||
* uvm_meter: calculate load average and wake up the swapper (if needed)
|
||||
* uvm_meter: recompute load averages
|
||||
*/
|
||||
void
|
||||
uvm_meter(void)
|
||||
uvm_meter(void *unused)
|
||||
{
|
||||
if ((gettime() % 5) == 0)
|
||||
uvm_loadav(&averunnable);
|
||||
if (proc0.p_slptime > (maxslp / 2))
|
||||
wakeup(&proc0);
|
||||
timeout_add_sec(&uvm_meter_to, UVM_METER_INTVL);
|
||||
uvm_loadav(&averunnable);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue