This commit is contained in:
purplerain 2023-06-20 20:38:03 +00:00
parent 451579e149
commit a2dd1eda92
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
89 changed files with 1343 additions and 775 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dwmshc.c,v 1.4 2023/04/19 02:01:02 dlg Exp $ */
/* $OpenBSD: dwmshc.c,v 1.5 2023/06/20 09:26:36 kettenis Exp $ */
/*
* Copyright (c) 2023 David Gwynne <dlg@openbsd.org>
@ -98,7 +98,7 @@
#define EMMC_DLL_RXCLK_RX_CLK_OUT_SEL (1U << 27)
#define EMMC_DLL_RXCLK_RX_CLK_CHANGE_WINDOW (1U << 28)
#define EMMC_DLL_RXCLK_RX_CLK_SRC_SEL (1U << 29)
#define EMMC_DLL_TXCLK 0x804
#define EMMC_DLL_TXCLK 0x808
#define EMMC_DLL_TXCLK_TX_TAP_NUM_SHIFT 0
#define EMMC_DLL_TXCLK_TX_TAP_NUM_MASK 0x1f
#define EMMC_DLL_TXCLK_TX_TAP_VALUE_SHIFT 8

View file

@ -267,7 +267,7 @@ typedef enum {
#define AAC_ERROR_FIB_DEALLOCATION_FAILED 0x08
/*
* Adapter Init Structure: this is passed to the adapter with the
* Adapter Init Structure: this is passed to the adapter with the
* AAC_MONKER_INITSTRUCT command to point it at our control structures.
*/
struct aac_adapter_init {
@ -423,7 +423,7 @@ typedef enum {
CPU_MIPS,
CPU_XSCALE,
CPU__last
} AAC_CpuType;
} AAC_CpuType;
typedef enum {
CPUI960_JX = 1,
@ -481,7 +481,7 @@ typedef enum {
* XXX the aac-2622 with no battery present reports PLATFORM_BAT_OPT_PRESENT
*/
typedef enum
{
{
PLATFORM_BAT_REQ_PRESENT = 1, /* BATTERY REQUIRED AND PRESENT */
PLATFORM_BAT_REQ_NOTPRESENT, /* BATTERY REQUIRED AND NOT PRESENT */
PLATFORM_BAT_OPT_PRESENT, /* BATTERY OPTIONAL AND PRESENT */
@ -489,9 +489,9 @@ typedef enum
PLATFORM_BAT_NOT_SUPPORTED /* BATTERY NOT SUPPORTED */
} AAC_BatteryPlatform;
/*
/*
* options supported by this board
* there has to be a one to one mapping of these defines and the ones in
* there has to be a one to one mapping of these defines and the ones in
* fsaapi.h, search for FSA_SUPPORT_SNAPSHOT
*/
#define AAC_SUPPORTED_SNAPSHOT 0x01
@ -508,7 +508,7 @@ typedef enum
#define AAC_SUPPORTED_ALARM 0x800
#define AAC_SUPPORTED_NONDASD 0x1000
/*
/*
* Structure used to respond to a RequestAdapterInfo fib.
*/
struct aac_adapter_info {
@ -564,7 +564,7 @@ struct aac_adapter_info {
#define AAC_KERNEL_PANIC 0x00000100
/*
* Data types relating to control and monitoring of the NVRAM/WriteCache
* Data types relating to control and monitoring of the NVRAM/WriteCache
* subsystem.
*/
@ -732,7 +732,7 @@ typedef enum {
AifEnGeneric = 1, /* Generic notification */
AifEnTaskComplete, /* Task has completed */
AifEnConfigChange, /* Adapter config change occurred */
AifEnContainerChange, /* Adapter specific container
AifEnContainerChange, /* Adapter specific container
* configuration change */
AifEnDeviceFailure, /* SCSI device failed */
AifEnMirrorFailover, /* Mirror failover started */
@ -832,7 +832,7 @@ struct aac_AifEventNotify {
/*
* Adapter Initiated FIB command structures. Start with the adapter
* initiated FIBs that really come from the adapter, and get responded
* to by the host.
* to by the host.
*/
#define AAC_AIF_REPORT_MAX_SIZE 64
@ -1378,7 +1378,7 @@ enum {
/*
* The adapter can request the host print a message by setting the
* DB_PRINTF flag in DOORBELL0. The driver responds by collecting the
* message from the printf buffer, clearing the DB_PRINTF flag in
* message from the printf buffer, clearing the DB_PRINTF flag in
* DOORBELL0 and setting it in DOORBELL1.
* (ODBR and IDBR respectively for the i960Rx adapters)
*/

View file

@ -127,10 +127,10 @@ struct aac_softc;
#define AAC_SYNC_DELAY 20000
/*
* The firmware interface allows for a 16-bit s/g list length. We limit
* The firmware interface allows for a 16-bit s/g list length. We limit
* ourselves to a reasonable maximum and ensure alignment.
*/
#define AAC_MAXSGENTRIES 64 /* max S/G entries, limit 65535 */
#define AAC_MAXSGENTRIES 64 /* max S/G entries, limit 65535 */
/*
* We gather a number of adapter-visible items into a single structure.
@ -157,7 +157,7 @@ struct aac_common {
/* buffer for text messages from the controller */
char ac_printf[AAC_PRINTF_BUFSIZE];
/* fib for synchronous commands */
struct aac_fib ac_sync_fib;
};
@ -348,7 +348,7 @@ struct aac_softc
/* command management */
struct mutex aac_free_mtx;
TAILQ_HEAD(,aac_command) aac_free; /* command structures
TAILQ_HEAD(,aac_command) aac_free; /* command structures
* available for reuse */
TAILQ_HEAD(,aac_command) aac_ready; /* commands on hold for
* controller resources */

View file

@ -989,7 +989,7 @@ encapped:
} else
rate = ni->ni_rates.rs_rates[ni->ni_txrate];
rate &= IEEE80211_RATE_VAL;
}
}
#if NBPFILTER > 0
if (ic->ic_rawbpf != NULL)
@ -2375,7 +2375,7 @@ acx_set_probe_resp_tmplt(struct acx_softc *sc, struct ieee80211_node *ni)
m_copydata(m, 0, m->m_pkthdr.len, &resp.data);
len = m->m_pkthdr.len + sizeof(resp.size);
m_freem(m);
m_freem(m);
return (acx_set_tmplt(sc, ACXCMD_TMPLT_PROBE_RESP, &resp, len));
}

View file

@ -36,10 +36,10 @@
*/
/*
* advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
*
*
* Copyright (c) 1995-1996 Advanced System Products, Inc.
* All Rights Reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that redistributions of source
* code retain the above copyright notice and this comment without

View file

@ -553,7 +553,7 @@ aic_poll(struct aic_softc *sc, struct scsi_xfer *xs, int count)
}
return 1;
}
/*
* LOW LEVEL SCSI UTILITIES
*/
@ -684,7 +684,7 @@ abort:
aic_sched_msgout(sc, SEND_ABORT);
return (1);
}
/*
* Schedule a SCSI operation. This has now been pulled out of the interrupt
* handler so that we may call it from aic_scsi_cmd and aic_done. This may
@ -726,7 +726,7 @@ aic_sched(struct aic_softc *sc)
bus_space_write_1(iot, ioh, SIMODE1, ENSCSIRST);
bus_space_write_1(iot, ioh, SCSISEQ, ENRESELI);
}
void
aic_sense(struct aic_softc *sc, struct aic_acb *acb)
{
@ -831,7 +831,7 @@ aic_dequeue(struct aic_softc *sc, struct aic_acb *acb)
TAILQ_REMOVE(&sc->ready_list, acb, chain);
}
}
/*
* INTERRUPT/PROTOCOL ENGINE
*/
@ -1305,7 +1305,7 @@ out:
/* Disable REQ/ACK protocol. */
bus_space_write_1(iot, ioh, SXFRCTL0, CHEN);
}
/* aic_dataout_pio: perform a data transfer using the FIFO datapath in the aic6360
* Precondition: The SCSI bus should be in the DOUT phase, with REQ asserted
* and ACK deasserted (i.e. waiting for a data byte).
@ -1446,7 +1446,7 @@ phasechange:
return out;
}
/* aic_datain_pio: perform data transfers using the FIFO datapath in the aic6360
* Precondition: The SCSI bus should be in the DIN phase, with REQ asserted
* and ACK deasserted (i.e. at least one byte is ready).
@ -1569,7 +1569,7 @@ phasechange:
return in;
}
/*
* This is the workhorse routine of the driver.
* Deficiencies (for now):
@ -1992,7 +1992,7 @@ aic_timeout(void *arg)
splx(s);
}
#ifdef AIC_DEBUG
/*
* The following functions are mostly used for debugging purposes, either

View file

@ -825,7 +825,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
if (scb != NULL)
ahc_set_transaction_status(scb,
CAM_UNCOR_PARITY);
ahc_reset_channel(ahc, devinfo.channel,
ahc_reset_channel(ahc, devinfo.channel,
/*init reset*/TRUE);
}
} else {

View file

@ -823,7 +823,7 @@ struct seeprom_config {
#define CFBIOS_BUSSCAN 0x0008 /* Have the BIOS Scan the Bus */
#define CFSM2DRV 0x0010 /* support more than two drives */
#define CFSTPWLEVEL 0x0010 /* Termination level control */
#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */
#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */
#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
#define CFTERM_MENU 0x0040 /* BIOS displays termination menu */
#define CFEXTEND 0x0080 /* extended translation enabled */

View file

@ -80,9 +80,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
struct amdgpu_bo_vm *vmbo;
bo = shadow_bo->parent;
vmbo = to_amdgpu_bo_vm(bo);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&vmbo->shadow_list)) {
@ -693,11 +694,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
/* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
* is initialized.
*/
bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
@ -714,6 +710,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
mutex_lock(&adev->shadow_list_lock);
list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
mutex_unlock(&adev->shadow_list_lock);
}

View file

@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
amdgpu_bo_add_to_shadow_list(*vmbo);
return 0;

View file

@ -806,7 +806,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct drm_buddy *mm = &mgr->mm;
struct drm_buddy_block *block;
struct amdgpu_vram_reservation *rsv;
drm_printf(printer, " vis usage:%llu\n",
amdgpu_vram_mgr_vis_usage(mgr));
@ -818,8 +818,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
drm_buddy_print(mm, printer);
drm_printf(printer, "reserved:\n");
list_for_each_entry(block, &mgr->reserved_pages, link)
drm_buddy_block_print(mm, block, printer);
list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
drm_printf(printer, "%#018llx-%#018llx: %llu\n",
rsv->start, rsv->start + rsv->size, rsv->size);
mutex_unlock(&mgr->lock);
}

View file

@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
if (adev->flags & AMD_IS_APU)
return reference_clock;
if (adev->flags & AMD_IS_APU) {
switch (adev->asic_type) {
case CHIP_STONEY:
/* vbios says 48Mhz, but the actual freq is 100Mhz */
return 10000;
default:
return reference_clock;
}
}
tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))

View file

@ -137,7 +137,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_sdp_bw_after_urgent = 100.0,
.pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented

View file

@ -2067,33 +2067,96 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
uint32_t *gen_speed_override,
uint32_t *lane_width_override)
{
struct amdgpu_device *adev = smu->adev;
*gen_speed_override = 0xff;
*lane_width_override = 0xff;
switch (adev->pdev->device) {
case 0x73A0:
case 0x73A1:
case 0x73A2:
case 0x73A3:
case 0x73AB:
case 0x73AE:
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
*lane_width_override = 6;
break;
case 0x73E0:
case 0x73E1:
case 0x73E3:
*lane_width_override = 4;
break;
case 0x7420:
case 0x7421:
case 0x7422:
case 0x7423:
case 0x7424:
*lane_width_override = 3;
break;
default:
break;
}
}
#ifndef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#endif
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
uint32_t smu_pcie_arg;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
uint32_t gen_speed_override, lane_width_override;
uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
int ret, i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
/* lclk dpm table setup */
for (i = 0; i < MAX_PCIE_CONF; i++) {
dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i];
dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i];
sienna_cichlid_get_override_pcie_settings(smu,
&gen_speed_override,
&lane_width_override);
/* PCIE gen speed override */
if (gen_speed_override != 0xff) {
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
} else {
min_gen_speed = MAX(0, table_member1[0]);
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
}
pcie_table->pcie_gen[0] = min_gen_speed;
pcie_table->pcie_gen[1] = max_gen_speed;
/* PCIE lane width override */
if (lane_width_override != 0xff) {
min_lane_width = MIN(pcie_width_cap, lane_width_override);
max_lane_width = MIN(pcie_width_cap, lane_width_override);
} else {
min_lane_width = MAX(1, table_member2[0]);
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
}
pcie_table->pcie_lane[0] = min_lane_width;
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) |
((table_member1[i] <= pcie_gen_cap) ?
(table_member1[i] << 8) :
(pcie_gen_cap << 8)) |
((table_member2[i] <= pcie_width_cap) ?
table_member2[i] :
pcie_width_cap);
smu_pcie_arg = (i << 16 |
pcie_table->pcie_gen[i] << 8 |
pcie_table->pcie_lane[i]);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
@ -2101,11 +2164,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
NULL);
if (ret)
return ret;
if (table_member1[i] > pcie_gen_cap)
dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
if (table_member2[i] > pcie_width_cap)
dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
}
return 0;

View file

@ -582,11 +582,11 @@ int smu_v13_0_init_power(struct smu_context *smu)
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
GFP_KERNEL);
if (!smu_power->power_context)
return -ENOMEM;
smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
return 0;
}

View file

@ -119,6 +119,32 @@ static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 1;
}
static int intel_dp_aux_sync_len(void)
{
int precharge = 16; /* 10-16 */
int preamble = 16;
return precharge + preamble;
}
static int intel_dp_aux_fw_sync_len(void)
{
int precharge = 10; /* 10-16 */
int preamble = 8;
return precharge + preamble;
}
static int g4x_dp_aux_precharge_len(void)
{
int precharge_min = 10;
int preamble = 16;
/* HW wants the length of the extra precharge in 2us units */
return (intel_dp_aux_sync_len() -
precharge_min - preamble) / 2;
}
static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 aux_clock_divider)
@ -141,7 +167,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
timeout |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(g4x_dp_aux_precharge_len() << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
@ -165,8 +191,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
if (intel_tc_port_in_tbt_alt_mode(dig_port))
ret |= DP_AUX_CH_CTL_TBT_IO;

View file

@ -179,97 +179,108 @@ out_file:
}
struct parallel_switch {
struct task_struct *tsk;
struct kthread_worker *worker;
struct kthread_work work;
struct intel_context *ce[2];
int result;
};
static int __live_parallel_switch1(void *data)
static void __live_parallel_switch1(struct kthread_work *work)
{
struct parallel_switch *arg = data;
struct parallel_switch *arg =
container_of(work, typeof(*arg), work);
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
arg->result = 0;
do {
struct i915_request *rq = NULL;
int err, n;
int n;
err = 0;
for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
struct i915_request *prev = rq;
rq = i915_request_create(arg->ce[n]);
if (IS_ERR(rq)) {
i915_request_put(prev);
return PTR_ERR(rq);
arg->result = PTR_ERR(rq);
break;
}
i915_request_get(rq);
if (prev) {
err = i915_request_await_dma_fence(rq, &prev->fence);
arg->result =
i915_request_await_dma_fence(rq,
&prev->fence);
i915_request_put(prev);
}
i915_request_add(rq);
}
if (IS_ERR_OR_NULL(rq))
break;
if (i915_request_wait(rq, 0, HZ) < 0)
err = -ETIME;
arg->result = -ETIME;
i915_request_put(rq);
if (err)
return err;
count++;
} while (!__igt_timeout(end_time, NULL));
} while (!arg->result && !__igt_timeout(end_time, NULL));
pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
return 0;
pr_info("%s: %lu switches (sync) <%d>\n",
arg->ce[0]->engine->name, count, arg->result);
}
static int __live_parallel_switchN(void *data)
static void __live_parallel_switchN(struct kthread_work *work)
{
struct parallel_switch *arg = data;
struct parallel_switch *arg =
container_of(work, typeof(*arg), work);
struct i915_request *rq = NULL;
IGT_TIMEOUT(end_time);
unsigned long count;
int n;
count = 0;
arg->result = 0;
do {
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
struct i915_request *prev = rq;
int err = 0;
rq = i915_request_create(arg->ce[n]);
if (IS_ERR(rq)) {
i915_request_put(prev);
return PTR_ERR(rq);
arg->result = PTR_ERR(rq);
break;
}
i915_request_get(rq);
if (prev) {
err = i915_request_await_dma_fence(rq, &prev->fence);
arg->result =
i915_request_await_dma_fence(rq,
&prev->fence);
i915_request_put(prev);
}
i915_request_add(rq);
if (err) {
i915_request_put(rq);
return err;
}
}
count++;
} while (!__igt_timeout(end_time, NULL));
i915_request_put(rq);
} while (!arg->result && !__igt_timeout(end_time, NULL));
pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
return 0;
if (!IS_ERR_OR_NULL(rq))
i915_request_put(rq);
pr_info("%s: %lu switches (many) <%d>\n",
arg->ce[0]->engine->name, count, arg->result);
}
static int live_parallel_switch(void *arg)
{
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
static void (* const func[])(struct kthread_work *) = {
__live_parallel_switch1,
__live_parallel_switchN,
NULL,
@ -277,7 +288,7 @@ static int live_parallel_switch(void *arg)
struct parallel_switch *data = NULL;
struct i915_gem_engines *engines;
struct i915_gem_engines_iter it;
int (* const *fn)(void *arg);
void (* const *fn)(struct kthread_work *);
struct i915_gem_context *ctx;
struct intel_context *ce;
struct file *file;
@ -335,8 +346,10 @@ static int live_parallel_switch(void *arg)
continue;
ce = intel_context_create(data[m].ce[0]->engine);
if (IS_ERR(ce))
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
err = intel_context_pin(ce);
if (err) {
@ -348,9 +361,24 @@ static int live_parallel_switch(void *arg)
}
}
for (n = 0; n < count; n++) {
struct kthread_worker *worker;
if (!data[n].ce[0])
continue;
worker = kthread_create_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
goto out;
}
data[n].worker = worker;
}
for (fn = func; !err && *fn; fn++) {
struct igt_live_test t;
int n;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
@ -360,34 +388,23 @@ static int live_parallel_switch(void *arg)
if (!data[n].ce[0])
continue;
data[n].tsk = kthread_run(*fn, &data[n],
"igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(data[n].tsk)) {
err = PTR_ERR(data[n].tsk);
break;
}
get_task_struct(data[n].tsk);
data[n].result = 0;
kthread_init_work(&data[n].work, *fn);
kthread_queue_work(data[n].worker, &data[n].work);
}
yield(); /* start all threads before we kthread_stop() */
for (n = 0; n < count; n++) {
int status;
if (IS_ERR_OR_NULL(data[n].tsk))
continue;
status = kthread_stop(data[n].tsk);
if (status && !err)
err = status;
put_task_struct(data[n].tsk);
data[n].tsk = NULL;
if (data[n].ce[0]) {
kthread_flush_work(&data[n].work);
if (data[n].result && !err)
err = data[n].result;
}
}
if (igt_live_test_end(&t))
err = -EIO;
if (igt_live_test_end(&t)) {
err = err ?: -EIO;
break;
}
}
out:
@ -399,6 +416,9 @@ out:
intel_context_unpin(data[n].ce[m]);
intel_context_put(data[n].ce[m]);
}
if (data[n].worker)
kthread_destroy_worker(data[n].worker);
}
kfree(data);
out_file:

View file

@ -1532,8 +1532,8 @@ static int live_busywait_preempt(void *arg)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
enum intel_engine_id id;
int err = -ENOMEM;
u32 *map;
int err;
/*
* Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
@ -1541,13 +1541,17 @@ static int live_busywait_preempt(void *arg)
*/
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
return -ENOMEM;
if (IS_ERR(ctx_hi))
return PTR_ERR(ctx_hi);
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
if (!ctx_lo)
if (IS_ERR(ctx_lo)) {
err = PTR_ERR(ctx_lo);
goto err_ctx_hi;
}
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
@ -3475,12 +3479,14 @@ static int random_priority(struct rnd_state *rnd)
struct preempt_smoke {
struct intel_gt *gt;
struct kthread_work work;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch;
unsigned int ncontext;
struct rnd_state prng;
unsigned long count;
int result;
};
static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
@ -3540,34 +3546,31 @@ unpin:
return err;
}
static int smoke_crescendo_thread(void *arg)
static void smoke_crescendo_work(struct kthread_work *work)
{
struct preempt_smoke *smoke = arg;
struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work);
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
err = smoke_submit(smoke,
ctx, count % I915_PRIORITY_MAX,
smoke->batch);
if (err)
return err;
smoke->result = smoke_submit(smoke, ctx,
count % I915_PRIORITY_MAX,
smoke->batch);
count++;
} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
} while (!smoke->result && count < smoke->ncontext &&
!__igt_timeout(end_time, NULL));
smoke->count = count;
return 0;
}
static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
#define BATCH BIT(0)
{
struct task_struct *tsk[I915_NUM_ENGINES] = {};
struct kthread_worker *worker[I915_NUM_ENGINES] = {};
struct preempt_smoke *arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@ -3578,6 +3581,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
if (!arg)
return -ENOMEM;
memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg));
for_each_engine(engine, smoke->gt, id) {
arg[id] = *smoke;
arg[id].engine = engine;
@ -3585,31 +3590,28 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
arg[id].batch = NULL;
arg[id].count = 0;
tsk[id] = kthread_run(smoke_crescendo_thread, arg,
"igt/smoke:%d", id);
if (IS_ERR(tsk[id])) {
err = PTR_ERR(tsk[id]);
worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
if (IS_ERR(worker[id])) {
err = PTR_ERR(worker[id]);
break;
}
get_task_struct(tsk[id]);
}
yield(); /* start all threads before we kthread_stop() */
kthread_init_work(&arg[id].work, smoke_crescendo_work);
kthread_queue_work(worker[id], &arg[id].work);
}
count = 0;
for_each_engine(engine, smoke->gt, id) {
int status;
if (IS_ERR_OR_NULL(tsk[id]))
if (IS_ERR_OR_NULL(worker[id]))
continue;
status = kthread_stop(tsk[id]);
if (status && !err)
err = status;
kthread_flush_work(&arg[id].work);
if (arg[id].result && !err)
err = arg[id].result;
count += arg[id].count;
put_task_struct(tsk[id]);
kthread_destroy_worker(worker[id]);
}
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",

View file

@ -866,10 +866,13 @@ static int igt_reset_active_engine(void *arg)
}
struct active_engine {
struct task_struct *task;
struct kthread_worker *worker;
struct kthread_work work;
struct intel_engine_cs *engine;
unsigned long resets;
unsigned int flags;
bool stop;
int result;
};
#define TEST_ACTIVE BIT(0)
@ -900,10 +903,10 @@ static int active_request_put(struct i915_request *rq)
return err;
}
static int active_engine(void *data)
static void active_engine(struct kthread_work *work)
{
I915_RND_STATE(prng);
struct active_engine *arg = data;
struct active_engine *arg = container_of(work, typeof(*arg), work);
struct intel_engine_cs *engine = arg->engine;
struct i915_request *rq[8] = {};
struct intel_context *ce[ARRAY_SIZE(rq)];
@ -913,16 +916,17 @@ static int active_engine(void *data)
for (count = 0; count < ARRAY_SIZE(ce); count++) {
ce[count] = intel_context_create(engine);
if (IS_ERR(ce[count])) {
err = PTR_ERR(ce[count]);
pr_err("[%s] Create context #%ld failed: %d!\n", engine->name, count, err);
arg->result = PTR_ERR(ce[count]);
pr_err("[%s] Create context #%ld failed: %d!\n",
engine->name, count, arg->result);
while (--count)
intel_context_put(ce[count]);
return err;
return;
}
}
count = 0;
while (!kthread_should_stop()) {
while (!READ_ONCE(arg->stop)) {
unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
struct i915_request *old = rq[idx];
struct i915_request *new;
@ -967,7 +971,7 @@ static int active_engine(void *data)
intel_context_put(ce[count]);
}
return err;
arg->result = err;
}
static int __igt_reset_engines(struct intel_gt *gt,
@ -1022,7 +1026,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES);
for_each_engine(other, gt, tmp) {
struct task_struct *tsk;
struct kthread_worker *worker;
threads[tmp].resets =
i915_reset_engine_count(global, other);
@ -1036,19 +1040,21 @@ static int __igt_reset_engines(struct intel_gt *gt,
threads[tmp].engine = other;
threads[tmp].flags = flags;
tsk = kthread_run(active_engine, &threads[tmp],
"igt/%s", other->name);
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
worker = kthread_create_worker(0, "igt/%s",
other->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
pr_err("[%s] Worker create failed: %d!\n",
engine->name, err);
goto unwind;
}
threads[tmp].task = tsk;
get_task_struct(tsk);
}
threads[tmp].worker = worker;
yield(); /* start all threads before we begin */
kthread_init_work(&threads[tmp].work, active_engine);
kthread_queue_work(threads[tmp].worker,
&threads[tmp].work);
}
st_engine_heartbeat_disable_no_pm(engine);
GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
@ -1197,17 +1203,20 @@ unwind:
for_each_engine(other, gt, tmp) {
int ret;
if (!threads[tmp].task)
if (!threads[tmp].worker)
continue;
ret = kthread_stop(threads[tmp].task);
WRITE_ONCE(threads[tmp].stop, true);
kthread_flush_work(&threads[tmp].work);
ret = READ_ONCE(threads[tmp].result);
if (ret) {
pr_err("kthread for other engine %s failed, err=%d\n",
other->name, ret);
if (!err)
err = ret;
}
put_task_struct(threads[tmp].task);
kthread_destroy_worker(threads[tmp].worker);
/* GuC based resets are not logged per engine */
if (!using_guc) {

View file

@ -299,9 +299,18 @@ __live_request_alloc(struct intel_context *ce)
return intel_context_create_request(ce);
}
static int __igt_breadcrumbs_smoketest(void *arg)
struct smoke_thread {
struct kthread_worker *worker;
struct kthread_work work;
struct smoketest *t;
bool stop;
int result;
};
static void __igt_breadcrumbs_smoketest(struct kthread_work *work)
{
struct smoketest *t = arg;
struct smoke_thread *thread = container_of(work, typeof(*thread), work);
struct smoketest *t = thread->t;
const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
const unsigned int total = 4 * t->ncontexts + 1;
unsigned int num_waits = 0, num_fences = 0;
@ -320,8 +329,10 @@ static int __igt_breadcrumbs_smoketest(void *arg)
*/
requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
if (!requests)
return -ENOMEM;
if (!requests) {
thread->result = -ENOMEM;
return;
}
order = i915_random_order(total, &prng);
if (!order) {
@ -329,7 +340,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
goto out_requests;
}
while (!kthread_should_stop()) {
while (!READ_ONCE(thread->stop)) {
struct i915_sw_fence *submit, *wait;
unsigned int n, count;
@ -437,7 +448,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
kfree(order);
out_requests:
kfree(requests);
return err;
thread->result = err;
}
static int mock_breadcrumbs_smoketest(void *arg)
@ -450,7 +461,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
.request_alloc = __mock_request_alloc
};
unsigned int ncpus = num_online_cpus();
struct task_struct **threads;
struct smoke_thread *threads;
unsigned int n;
int ret = 0;
@ -479,28 +490,37 @@ static int mock_breadcrumbs_smoketest(void *arg)
}
for (n = 0; n < ncpus; n++) {
threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
&t, "igt/%d", n);
if (IS_ERR(threads[n])) {
ret = PTR_ERR(threads[n]);
struct kthread_worker *worker;
worker = kthread_create_worker(0, "igt/%d", n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
ncpus = n;
break;
}
get_task_struct(threads[n]);
threads[n].worker = worker;
threads[n].t = &t;
threads[n].stop = false;
threads[n].result = 0;
kthread_init_work(&threads[n].work,
__igt_breadcrumbs_smoketest);
kthread_queue_work(worker, &threads[n].work);
}
yield(); /* start all threads before we begin */
drm_msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
for (n = 0; n < ncpus; n++) {
int err;
err = kthread_stop(threads[n]);
WRITE_ONCE(threads[n].stop, true);
kthread_flush_work(&threads[n].work);
err = READ_ONCE(threads[n].result);
if (err < 0 && !ret)
ret = err;
put_task_struct(threads[n]);
kthread_destroy_worker(threads[n].worker);
}
pr_info("Completed %lu waits for %lu fence across %d cpus\n",
atomic_long_read(&t.num_waits),
@ -1419,9 +1439,18 @@ out_free:
return err;
}
static int __live_parallel_engine1(void *arg)
struct parallel_thread {
struct kthread_worker *worker;
struct kthread_work work;
struct intel_engine_cs *engine;
int result;
};
static void __live_parallel_engine1(struct kthread_work *work)
{
struct intel_engine_cs *engine = arg;
struct parallel_thread *thread =
container_of(work, typeof(*thread), work);
struct intel_engine_cs *engine = thread->engine;
IGT_TIMEOUT(end_time);
unsigned long count;
int err = 0;
@ -1452,12 +1481,14 @@ static int __live_parallel_engine1(void *arg)
intel_engine_pm_put(engine);
pr_info("%s: %lu request + sync\n", engine->name, count);
return err;
thread->result = err;
}
static int __live_parallel_engineN(void *arg)
static void __live_parallel_engineN(struct kthread_work *work)
{
struct intel_engine_cs *engine = arg;
struct parallel_thread *thread =
container_of(work, typeof(*thread), work);
struct intel_engine_cs *engine = thread->engine;
IGT_TIMEOUT(end_time);
unsigned long count;
int err = 0;
@ -1479,7 +1510,7 @@ static int __live_parallel_engineN(void *arg)
intel_engine_pm_put(engine);
pr_info("%s: %lu requests\n", engine->name, count);
return err;
thread->result = err;
}
static bool wake_all(struct drm_i915_private *i915)
@ -1505,9 +1536,11 @@ static int wait_for_all(struct drm_i915_private *i915)
return -ETIME;
}
static int __live_parallel_spin(void *arg)
static void __live_parallel_spin(struct kthread_work *work)
{
struct intel_engine_cs *engine = arg;
struct parallel_thread *thread =
container_of(work, typeof(*thread), work);
struct intel_engine_cs *engine = thread->engine;
struct igt_spinner spin;
struct i915_request *rq;
int err = 0;
@ -1520,7 +1553,8 @@ static int __live_parallel_spin(void *arg)
if (igt_spinner_init(&spin, engine->gt)) {
wake_all(engine->i915);
return -ENOMEM;
thread->result = -ENOMEM;
return;
}
intel_engine_pm_get(engine);
@ -1553,22 +1587,22 @@ static int __live_parallel_spin(void *arg)
out_spin:
igt_spinner_fini(&spin);
return err;
thread->result = err;
}
static int live_parallel_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
static void (* const func[])(struct kthread_work *) = {
__live_parallel_engine1,
__live_parallel_engineN,
__live_parallel_spin,
NULL,
};
const unsigned int nengines = num_uabi_engines(i915);
struct parallel_thread *threads;
struct intel_engine_cs *engine;
int (* const *fn)(void *arg);
struct task_struct **tsk;
void (* const *fn)(struct kthread_work *);
int err = 0;
/*
@ -1576,8 +1610,8 @@ static int live_parallel_engines(void *arg)
* tests that we load up the system maximally.
*/
tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
if (!tsk)
threads = kcalloc(nengines, sizeof(*threads), GFP_KERNEL);
if (!threads)
return -ENOMEM;
for (fn = func; !err && *fn; fn++) {
@ -1594,37 +1628,44 @@ static int live_parallel_engines(void *arg)
idx = 0;
for_each_uabi_engine(engine, i915) {
tsk[idx] = kthread_run(*fn, engine,
"igt/parallel:%s",
engine->name);
if (IS_ERR(tsk[idx])) {
err = PTR_ERR(tsk[idx]);
struct kthread_worker *worker;
worker = kthread_create_worker(0, "igt/parallel:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
break;
}
get_task_struct(tsk[idx++]);
}
yield(); /* start all threads before we kthread_stop() */
threads[idx].worker = worker;
threads[idx].result = 0;
threads[idx].engine = engine;
kthread_init_work(&threads[idx].work, *fn);
kthread_queue_work(worker, &threads[idx].work);
idx++;
}
idx = 0;
for_each_uabi_engine(engine, i915) {
int status;
if (IS_ERR(tsk[idx]))
if (!threads[idx].worker)
break;
status = kthread_stop(tsk[idx]);
kthread_flush_work(&threads[idx].work);
status = READ_ONCE(threads[idx].result);
if (status && !err)
err = status;
put_task_struct(tsk[idx++]);
kthread_destroy_worker(threads[idx++].worker);
}
if (igt_live_test_end(&t))
err = -EIO;
}
kfree(tsk);
kfree(threads);
return err;
}
@ -1672,7 +1713,7 @@ static int live_breadcrumbs_smoketest(void *arg)
const unsigned int ncpus = num_online_cpus();
unsigned long num_waits, num_fences;
struct intel_engine_cs *engine;
struct task_struct **threads;
struct smoke_thread *threads;
struct igt_live_test live;
intel_wakeref_t wakeref;
struct smoketest *smoke;
@ -1746,23 +1787,26 @@ static int live_breadcrumbs_smoketest(void *arg)
smoke[idx].max_batch, engine->name);
for (n = 0; n < ncpus; n++) {
struct task_struct *tsk;
unsigned int i = idx * ncpus + n;
struct kthread_worker *worker;
tsk = kthread_run(__igt_breadcrumbs_smoketest,
&smoke[idx], "igt/%d.%d", idx, n);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
goto out_flush;
}
get_task_struct(tsk);
threads[idx * ncpus + n] = tsk;
threads[i].worker = worker;
threads[i].t = &smoke[idx];
kthread_init_work(&threads[i].work,
__igt_breadcrumbs_smoketest);
kthread_queue_work(worker, &threads[i].work);
}
idx++;
}
yield(); /* start all threads before we begin */
drm_msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
out_flush:
@ -1771,17 +1815,19 @@ out_flush:
num_fences = 0;
for_each_uabi_engine(engine, i915) {
for (n = 0; n < ncpus; n++) {
struct task_struct *tsk = threads[idx * ncpus + n];
unsigned int i = idx * ncpus + n;
int err;
if (!tsk)
if (!threads[i].worker)
continue;
err = kthread_stop(tsk);
WRITE_ONCE(threads[i].stop, true);
kthread_flush_work(&threads[i].work);
err = READ_ONCE(threads[i].result);
if (err < 0 && !ret)
ret = err;
put_task_struct(tsk);
kthread_destroy_worker(threads[i].worker);
}
num_waits += atomic_long_read(&smoke[idx].num_waits);
@ -2891,9 +2937,18 @@ out:
return err;
}
static int p_sync0(void *arg)
struct p_thread {
struct perf_stats p;
struct kthread_worker *worker;
struct kthread_work work;
struct intel_engine_cs *engine;
int result;
};
static void p_sync0(struct kthread_work *work)
{
struct perf_stats *p = arg;
struct p_thread *thread = container_of(work, typeof(*thread), work);
struct perf_stats *p = &thread->p;
struct intel_engine_cs *engine = p->engine;
struct intel_context *ce;
IGT_TIMEOUT(end_time);
@ -2902,13 +2957,16 @@ static int p_sync0(void *arg)
int err = 0;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
if (IS_ERR(ce)) {
thread->result = PTR_ERR(ce);
return;
}
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
return err;
thread->result = err;
return;
}
if (intel_engine_supports_stats(engine)) {
@ -2958,12 +3016,13 @@ static int p_sync0(void *arg)
intel_context_unpin(ce);
intel_context_put(ce);
return err;
thread->result = err;
}
static int p_sync1(void *arg)
static void p_sync1(struct kthread_work *work)
{
struct perf_stats *p = arg;
struct p_thread *thread = container_of(work, typeof(*thread), work);
struct perf_stats *p = &thread->p;
struct intel_engine_cs *engine = p->engine;
struct i915_request *prev = NULL;
struct intel_context *ce;
@ -2973,13 +3032,16 @@ static int p_sync1(void *arg)
int err = 0;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
if (IS_ERR(ce)) {
thread->result = PTR_ERR(ce);
return;
}
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
return err;
thread->result = err;
return;
}
if (intel_engine_supports_stats(engine)) {
@ -3031,12 +3093,13 @@ static int p_sync1(void *arg)
intel_context_unpin(ce);
intel_context_put(ce);
return err;
thread->result = err;
}
static int p_many(void *arg)
static void p_many(struct kthread_work *work)
{
struct perf_stats *p = arg;
struct p_thread *thread = container_of(work, typeof(*thread), work);
struct perf_stats *p = &thread->p;
struct intel_engine_cs *engine = p->engine;
struct intel_context *ce;
IGT_TIMEOUT(end_time);
@ -3045,13 +3108,16 @@ static int p_many(void *arg)
bool busy;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
if (IS_ERR(ce)) {
thread->result = PTR_ERR(ce);
return;
}
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
return err;
thread->result = err;
return;
}
if (intel_engine_supports_stats(engine)) {
@ -3092,26 +3158,23 @@ static int p_many(void *arg)
intel_context_unpin(ce);
intel_context_put(ce);
return err;
thread->result = err;
}
static int perf_parallel_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
static void (* const func[])(struct kthread_work *) = {
p_sync0,
p_sync1,
p_many,
NULL,
};
const unsigned int nengines = num_uabi_engines(i915);
void (* const *fn)(struct kthread_work *);
struct intel_engine_cs *engine;
int (* const *fn)(void *arg);
struct pm_qos_request qos;
struct {
struct perf_stats p;
struct task_struct *tsk;
} *engines;
struct p_thread *engines;
int err = 0;
engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
@ -3134,36 +3197,45 @@ static int perf_parallel_engines(void *arg)
idx = 0;
for_each_uabi_engine(engine, i915) {
struct kthread_worker *worker;
intel_engine_pm_get(engine);
memset(&engines[idx].p, 0, sizeof(engines[idx].p));
engines[idx].p.engine = engine;
engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
"igt:%s", engine->name);
if (IS_ERR(engines[idx].tsk)) {
err = PTR_ERR(engines[idx].tsk);
worker = kthread_create_worker(0, "igt:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
intel_engine_pm_put(engine);
break;
}
get_task_struct(engines[idx++].tsk);
}
engines[idx].worker = worker;
engines[idx].result = 0;
engines[idx].p.engine = engine;
engines[idx].engine = engine;
yield(); /* start all threads before we kthread_stop() */
kthread_init_work(&engines[idx].work, *fn);
kthread_queue_work(worker, &engines[idx].work);
idx++;
}
idx = 0;
for_each_uabi_engine(engine, i915) {
int status;
if (IS_ERR(engines[idx].tsk))
if (!engines[idx].worker)
break;
status = kthread_stop(engines[idx].tsk);
kthread_flush_work(&engines[idx].work);
status = READ_ONCE(engines[idx].result);
if (status && !err)
err = status;
intel_engine_pm_put(engine);
put_task_struct(engines[idx++].tsk);
kthread_destroy_worker(engines[idx].worker);
idx++;
}
if (igt_live_test_end(&t))