sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-11-11 01:29:48 +00:00
parent 5903cbe575
commit 62d64fa864
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
841 changed files with 83929 additions and 40755 deletions

View file

@ -47,7 +47,6 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_NORMAL:
@ -55,6 +54,7 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return true;
default:
case AMDGPU_CTX_PRIORITY_UNSET:
return false;
}
}
@ -64,7 +64,8 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
return DRM_SCHED_PRIORITY_UNSET;
pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_MIN;

View file

@ -2215,8 +2215,12 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->flags |= AMD_IS_PX;
if (!(adev->flags & AMD_IS_APU)) {
parent = pci_upstream_bridge(adev->pdev);
#ifdef notyet
parent = pcie_find_root_port(adev->pdev);
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
#else
adev->has_pr3 = false;
#endif
}
amdgpu_amdkfd_device_probe(adev);

View file

@ -410,7 +410,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
continue;
}
r = amdgpu_vm_clear_freed(adev, vm, NULL);
/* Reserve fences for two SDMA page table updates */
r = dma_resv_reserve_fences(resv, 2);
if (!r)
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (!r)
r = amdgpu_vm_handle_moved(adev, vm);

View file

@ -221,7 +221,7 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);

View file

@ -1147,7 +1147,7 @@ static void vi_program_aspm(struct amdgpu_device *adev)
bool bL1SS = false;
bool bClkReqSupport = true;
if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported())
return;
if (adev->flags & AMD_IS_APU ||

View file

@ -2346,14 +2346,62 @@ static int dm_late_init(void *handle)
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
}
static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
{
int ret;
u8 guid[16];
u64 tmp64;
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
goto out_fail;
if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN |
DP_UP_REQ_EN |
DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
if (ret != 16) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
if (memchr_inv(guid, 0, 16) == NULL) {
tmp64 = get_jiffies_64();
memcpy(&guid[0], &tmp64, sizeof(u64));
memcpy(&guid[8], &tmp64, sizeof(u64));
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
if (ret != 16) {
drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
goto out_fail;
}
}
memcpy(mgr->mst_primary->guid, guid, 16);
out_fail:
mutex_unlock(&mgr->lock);
}
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@ -2375,18 +2423,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (!dp_is_lttpr_present(aconnector->dc_link))
dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
aconnector->dc_link);
need_hotplug = true;
}
/* TODO: move resume_mst_branch_status() into drm mst resume again
* once topology probing work is pulled out from mst resume into mst
* resume 2nd step. mst resume 2nd step should be called after old
* state getting restored (i.e. drm_atomic_helper_resume()).
*/
resume_mst_branch_status(mgr);
}
}
drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
}
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
@ -2775,7 +2820,8 @@ static int dm_resume(void *handle)
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
int i, r, j;
int i, r, j, ret;
bool need_hotplug = false;
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
@ -2873,7 +2919,7 @@ static int dm_resume(void *handle)
continue;
/*
* this is the case when traversing through already created
* this is the case when traversing through already created end sink
* MST connectors, should be skipped
*/
if (aconnector && aconnector->mst_port)
@ -2933,6 +2979,27 @@ static int dm_resume(void *handle)
dm->cached_state = NULL;
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_port)
continue;
ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
aconnector->dc_link);
need_hotplug = true;
}
}
drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(ddev);
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);

View file

@ -1183,6 +1183,9 @@ static void disable_vbios_mode_if_required(
if (stream == NULL)
continue;
if (stream->apply_seamless_boot_optimization)
continue;
// only looking for first odm pipe
if (pipe->prev_odm_pipe)
continue;

View file

@ -1993,6 +1993,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
*states = ATTR_STATE_SUPPORTED;
break;
default:

View file

@ -2081,36 +2081,43 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
#ifndef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#endif
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
u32 smu_pcie_arg;
uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
int ret, i;
/* PCIE gen speed and lane width override */
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
min_gen_speed = MAX(0, table_member1[0]);
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
min_lane_width = MAX(1, table_member2[0]);
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
/* Force all levels to use the same settings */
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
}
pcie_table->pcie_gen[0] = max_gen_speed;
pcie_table->pcie_lane[0] = max_lane_width;
} else {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
pcie_table->pcie_gen[i] = pcie_gen_cap;
if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
}
pcie_table->pcie_gen[0] = min_gen_speed;
pcie_table->pcie_lane[0] = min_lane_width;
}
pcie_table->pcie_gen[1] = max_gen_speed;
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16 |

View file

@ -2584,14 +2584,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
if (!mstb)
return NULL;
if (memcmp(mstb->guid, guid, 16) == 0)
return mstb;
list_for_each_entry(port, &mstb->ports, next) {
if (!port->mstb)
continue;
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
if (found_mstb)

View file

@ -290,7 +290,8 @@ static int
update_connector_routing(struct drm_atomic_state *state,
struct drm_connector *connector,
struct drm_connector_state *old_connector_state,
struct drm_connector_state *new_connector_state)
struct drm_connector_state *new_connector_state,
bool added_by_user)
{
const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state,
* there's a chance the connector may have been destroyed during the
* process, but it's better to ignore that then cause
* drm_atomic_helper_resume() to fail.
*
* Last, we want to ignore connector registration when the connector
* was not pulled in the atomic state by user-space (ie, was pulled
* in by the driver, e.g. when updating a DP-MST stream).
*/
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
crtc_state->active) {
added_by_user && crtc_state->active) {
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] is not registered\n",
connector->base.id, connector->name);
@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_connector *connector;
struct drm_connector_state *old_connector_state, *new_connector_state;
int i, ret;
unsigned int connectors_mask = 0;
unsigned int connectors_mask = 0, user_connectors_mask = 0;
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
user_connectors_mask |= BIT(i);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
bool has_connectors =
@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
*/
ret = update_connector_routing(state, connector,
old_connector_state,
new_connector_state);
new_connector_state,
BIT(i) & user_connectors_mask);
if (ret)
return ret;
if (old_connector_state->crtc) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: drm_linux.c,v 1.103 2023/08/04 09:36:28 jsg Exp $ */
/* $OpenBSD: drm_linux.c,v 1.104 2023/10/20 03:38:58 jsg Exp $ */
/*
* Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
* Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
@ -1722,6 +1722,18 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
return false;
}
ktime_t
dma_fence_timestamp(struct dma_fence *fence)
{
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
CPU_BUSY_CYCLE();
return fence->timestamp;
} else {
return ktime_get();
}
}
long
dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
{

View file

@ -38,6 +38,14 @@ static const struct drm_dmi_panel_orientation_data gpd_micropc = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_onemix2s = {
.width = 1200,
.height = 1920,
.bios_dates = (const char * const []){ "05/21/2018", "10/26/2018",
"03/04/2019", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_pocket = {
.width = 1200,
.height = 1920,
@ -407,6 +415,14 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* One Mix 2S (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_onemix2s,
},
{}
};

View file

@ -270,6 +270,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
case 0:
case -EAGAIN:
case -ENOSPC: /* transient failure to evict? */
case -ENOBUFS: /* temporarily out of fences? */
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
@ -548,6 +549,7 @@ static int i915_error_to_vmf_fault(int err)
case 0:
case -EAGAIN:
case -ENOSPC: /* transient failure to evict? */
case -ENOBUFS: /* temporarily out of fences? */
case -ERESTART:
case -EINTR:
case -EBUSY:

View file

@ -235,8 +235,17 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
u32 flags = 0;
u32 *cs;
/*
* L3 fabric flush is needed for AUX CCS invalidation
* which happens as part of pipe-control so we can
* ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3
* deals with Protected Memory which is not needed for
* AUX CCS invalidation and lead to unwanted side effects.
*/
if (mode & EMIT_FLUSH)
flags |= PIPE_CONTROL_FLUSH_L3;
flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
flags |= PIPE_CONTROL_FLUSH_L3;
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/* Wa_1409600907:tgl,adl-p */

View file

@ -760,9 +760,18 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
static void i915_pmu_event_stop(struct perf_event *event, int flags)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = &i915->pmu;
if (pmu->closed)
goto out;
if (flags & PERF_EF_UPDATE)
i915_pmu_event_read(event);
i915_pmu_disable(event);
out:
event->hw.state = PERF_HES_STOPPED;
}

View file

@ -55,8 +55,7 @@ enum drm_sched_priority {
DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
DRM_SCHED_PRIORITY_COUNT,
DRM_SCHED_PRIORITY_UNSET = -2
DRM_SCHED_PRIORITY_COUNT
};
/**

View file

@ -64,6 +64,7 @@ int dma_fence_signal_timestamp(struct dma_fence *, ktime_t);
int dma_fence_signal_timestamp_locked(struct dma_fence *, ktime_t);
bool dma_fence_is_signaled(struct dma_fence *);
bool dma_fence_is_signaled_locked(struct dma_fence *);
ktime_t dma_fence_timestamp(struct dma_fence *);
long dma_fence_default_wait(struct dma_fence *, bool, long);
long dma_fence_wait_any_timeout(struct dma_fence **, uint32_t, bool, long,
uint32_t *);

View file

@ -2122,12 +2122,11 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
/*
* On DCE32 any encoder can drive any block so usually just use crtc id,
* but Apple thinks different at least on iMac10,1 and iMac11,2, so there use linkb,
* but Apple thinks different at least on iMac10,1, so there use linkb,
* otherwise the internal eDP panel will stay dark.
*/
if (ASIC_IS_DCE32(rdev)) {
if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1") ||
dmi_match(DMI_PRODUCT_NAME, "iMac11,2"))
if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
enc_idx = (dig->linkb) ? 1 : 0;
else
enc_idx = radeon_crtc->crtc_id;

View file

@ -847,7 +847,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (next) {
next->s_fence->scheduled.timestamp =
job->s_fence->finished.timestamp;
dma_fence_timestamp(&job->s_fence->finished);
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}

View file

@ -235,10 +235,6 @@ void ttm_device_fini(struct ttm_device *bdev)
struct ttm_resource_manager *man;
unsigned i;
man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
ttm_resource_manager_set_used(man, false);
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
mutex_unlock(&ttm_global_mutex);
@ -248,6 +244,10 @@ void ttm_device_fini(struct ttm_device *bdev)
if (ttm_bo_delayed_delete(bdev, true))
pr_debug("Delayed destroy list was clean\n");
man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
ttm_resource_manager_set_used(man, false);
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&man->lru[0]))

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_dwqe_pci.c,v 1.1 2023/10/11 12:52:01 stsp Exp $ */
/* $OpenBSD: if_dwqe_pci.c,v 1.2 2023/10/31 05:46:36 jsg Exp $ */
/*
* Copyright (c) 2023 Stefan Sperling <stsp@openbsd.org>
@ -21,33 +21,19 @@
*/
#include <sys/param.h>
#include <sys/conf.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/timeout.h>
#include <sys/task.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcidevs.h>
#if NBPFILTER > 0
#include <net/bpf.h>
#endif
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/ic/dwqereg.h>

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_iwm.c,v 1.410 2023/09/02 09:02:18 stsp Exp $ */
/* $OpenBSD: if_iwm.c,v 1.412 2023/11/06 08:34:41 stsp Exp $ */
/*
* Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
@ -6166,7 +6166,7 @@ uint8_t
iwm_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
{
int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
int primary_idx = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
int primary_idx = ic->ic_bss->ni_primary_chan;
/*
* The FW is expected to check the control channel position only
* when in HT/VHT and the channel width is not 20MHz. Return
@ -9294,8 +9294,17 @@ iwm_set_rate_table_vht(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
if (i < 2 && in->in_phyctxt->vht_chan_width >=
IEEE80211_VHTOP0_CHAN_WIDTH_80)
tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
else
else if (in->in_phyctxt->sco ==
IEEE80211_HTOP0_SCO_SCA ||
in->in_phyctxt->sco ==
IEEE80211_HTOP0_SCO_SCB)
tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
else {
/* no 40 MHz, fall back on MCS 8 */
tab &= ~IWM_RATE_VHT_MCS_RATE_CODE_MSK;
tab |= 8;
}
tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
if (i < 4) {
if (ieee80211_ra_vht_use_sgi(ni))

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_ix.c,v 1.204 2023/08/21 21:45:18 bluhm Exp $ */
/* $OpenBSD: if_ix.c,v 1.205 2023/10/20 07:31:12 jan Exp $ */
/******************************************************************************
@ -38,8 +38,8 @@
#include <dev/pci/ixgbe_type.h>
/*
* Our TCP/IP Stack could not handle packets greater than MAXMCLBYTES.
* This interface could not handle packets greater than IXGBE_TSO_SIZE.
* Our TCP/IP Stack is unable to handle packets greater than MAXMCLBYTES.
* This interface is unable to handle packets greater than IXGBE_TSO_SIZE.
*/
CTASSERT(MAXMCLBYTES <= IXGBE_TSO_SIZE);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_ixl.c,v 1.89 2023/09/29 19:44:47 bluhm Exp $ */
/* $OpenBSD: if_ixl.c,v 1.92 2023/10/20 13:21:15 jan Exp $ */
/*
* Copyright (c) 2013-2015, Intel Corporation
@ -71,6 +71,7 @@
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/route.h>
#include <net/toeplitz.h>
#if NBPFILTER > 0
@ -85,6 +86,8 @@
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/tcp_timer.h>
#include <netinet/tcp_var.h>
#include <netinet/udp.h>
#include <netinet/if_ether.h>
@ -827,6 +830,10 @@ struct ixl_tx_desc {
#define IXL_TX_DESC_BSIZE_MASK \
(IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
#define IXL_TX_CTX_DESC_CMD_TSO 0x10
#define IXL_TX_CTX_DESC_TLEN_SHIFT 30
#define IXL_TX_CTX_DESC_MSS_SHIFT 50
#define IXL_TX_DESC_L2TAG1_SHIFT 48
} __packed __aligned(16);
@ -893,11 +900,19 @@ struct ixl_rx_wb_desc_32 {
uint64_t qword3;
} __packed __aligned(16);
#define IXL_TX_PKT_DESCS 8
#define IXL_TX_PKT_DESCS 32
#define IXL_TX_QUEUE_ALIGN 128
#define IXL_RX_QUEUE_ALIGN 128
#define IXL_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */
#define IXL_TSO_SIZE ((255 * 1024) - 1)
#define IXL_MAX_DMA_SEG_SIZE ((16 * 1024) - 1)
/*
* Our TCP/IP Stack is unable handle packets greater than MAXMCLBYTES.
* This interface is unable handle packets greater than IXL_TSO_SIZE.
*/
CTASSERT(MAXMCLBYTES < IXL_TSO_SIZE);
#define IXL_PCIREG PCI_MAPREG_START
@ -1958,6 +1973,7 @@ ixl_attach(struct device *parent, struct device *self, void *aux)
ifp->if_capabilities |= IFCAP_CSUM_IPv4 |
IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
@ -2603,7 +2619,7 @@ ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
txm = &maps[i];
if (bus_dmamap_create(sc->sc_dmat,
IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
MAXMCLBYTES, IXL_TX_PKT_DESCS, IXL_MAX_DMA_SEG_SIZE, 0,
BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
&txm->txm_map) != 0)
goto uncreate;
@ -2787,7 +2803,8 @@ ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
}
static uint64_t
ixl_tx_setup_offload(struct mbuf *m0)
ixl_tx_setup_offload(struct mbuf *m0, struct ixl_tx_ring *txr,
unsigned int prod)
{
struct ether_extracted ext;
uint64_t hlen;
@ -2800,7 +2817,7 @@ ixl_tx_setup_offload(struct mbuf *m0)
}
if (!ISSET(m0->m_pkthdr.csum_flags,
M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT))
M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT|M_TCP_TSO))
return (offload);
ether_extract_headers(m0, &ext);
@ -2833,6 +2850,33 @@ ixl_tx_setup_offload(struct mbuf *m0)
offload |= (sizeof(*ext.udp) >> 2) << IXL_TX_DESC_L4LEN_SHIFT;
}
if (ISSET(m0->m_pkthdr.csum_flags, M_TCP_TSO)) {
if (ext.tcp) {
struct ixl_tx_desc *ring, *txd;
uint64_t cmd = 0, paylen, outlen;
hlen += ext.tcp->th_off << 2;
outlen = m0->m_pkthdr.ph_mss;
paylen = m0->m_pkthdr.len - ETHER_HDR_LEN - hlen;
ring = IXL_DMA_KVA(&txr->txr_mem);
txd = &ring[prod];
cmd |= IXL_TX_DESC_DTYPE_CONTEXT;
cmd |= IXL_TX_CTX_DESC_CMD_TSO;
cmd |= paylen << IXL_TX_CTX_DESC_TLEN_SHIFT;
cmd |= outlen << IXL_TX_CTX_DESC_MSS_SHIFT;
htolem64(&txd->addr, 0);
htolem64(&txd->cmd, cmd);
tcpstat_add(tcps_outpkttso,
(paylen + outlen - 1) / outlen);
} else
tcpstat_inc(tcps_outbadtso);
}
return (offload);
}
@ -2873,7 +2917,8 @@ ixl_start(struct ifqueue *ifq)
mask = sc->sc_tx_ring_ndescs - 1;
for (;;) {
if (free <= IXL_TX_PKT_DESCS) {
/* We need one extra descriptor for TSO packets. */
if (free <= (IXL_TX_PKT_DESCS + 1)) {
ifq_set_oactive(ifq);
break;
}
@ -2882,11 +2927,17 @@ ixl_start(struct ifqueue *ifq)
if (m == NULL)
break;
offload = ixl_tx_setup_offload(m);
offload = ixl_tx_setup_offload(m, txr, prod);
txm = &txr->txr_maps[prod];
map = txm->txm_map;
if (ISSET(m->m_pkthdr.csum_flags, M_TCP_TSO)) {
prod++;
prod &= mask;
free--;
}
if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
ifq->ifq_errors++;
m_freem(m);