sync with OpenBSD -current

This commit is contained in:
purplerain 2024-05-22 02:43:18 +00:00
parent b5356a44af
commit 12fde4069b
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
187 changed files with 1127 additions and 1365 deletions

View file

@ -300,12 +300,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");
dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
ring->name);
} else {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
dev_err(adev->dev,
"Error scheduling IBs (%d) in ring(%s)", r,
ring->name);
}
job->job_run_counter++;

View file

@ -1269,14 +1269,18 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
* amdgpu_bo_move_notify - notification about a memory move
* @bo: pointer to a buffer object
* @evict: if this move is evicting the buffer from the graphics address space
* @new_mem: new resource for backing the BO
*
* Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
* bookkeeping.
* TTM driver callback which is called when ttm moves a buffer.
*/
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_resource *old_mem = bo->resource;
struct amdgpu_bo *abo;
if (!amdgpu_bo_is_amdgpu_bo(bo))
@ -1289,13 +1293,13 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
#ifdef notyet
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
bo->resource->mem_type != TTM_PL_SYSTEM)
old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
#endif
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
/* move_notify is called before move happens */
trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
old_mem ? old_mem->mem_type : -1);
}
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,

View file

@ -329,7 +329,9 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags);
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict);
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,

View file

@ -424,7 +424,7 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
res->mem_type == AMDGPU_PL_PREEMPT)
res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
return true;
if (res->mem_type != TTM_PL_VRAM)
@ -432,7 +432,7 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
return false;
amdgpu_res_next(&cursor, cursor.size);
}
@ -486,14 +486,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
bo->ttm == NULL)) {
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
goto out;
return 0;
}
if (old_mem->mem_type == TTM_PL_SYSTEM &&
(new_mem->mem_type == TTM_PL_TT ||
new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
goto out;
return 0;
}
if ((old_mem->mem_type == TTM_PL_TT ||
old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
@ -503,9 +505,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
return 0;
}
if (old_mem->mem_type == AMDGPU_PL_GDS ||
@ -517,8 +520,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == AMDGPU_PL_OA ||
new_mem->mem_type == AMDGPU_PL_DOORBELL) {
/* Nothing to save here */
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
goto out;
return 0;
}
if (bo->type == ttm_bo_type_device &&
@ -530,23 +534,24 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
}
if (adev->mman.buffer_funcs_enabled) {
if (((old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) ||
(old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM))) {
hop->fpfn = 0;
hop->lpfn = 0;
hop->mem_type = TTM_PL_TT;
hop->flags = TTM_PL_FLAG_TEMPORARY;
return -EMULTIHOP;
}
r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
} else {
r = -ENODEV;
if (adev->mman.buffer_funcs_enabled &&
((old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) ||
(old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM))) {
hop->fpfn = 0;
hop->lpfn = 0;
hop->mem_type = TTM_PL_TT;
hop->flags = TTM_PL_FLAG_TEMPORARY;
return -EMULTIHOP;
}
amdgpu_bo_move_notify(bo, evict, new_mem);
if (adev->mman.buffer_funcs_enabled)
r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
else
r = -ENODEV;
if (r) {
/* Check that all memory is CPU accessible */
if (!amdgpu_res_copyable(adev, old_mem) ||
@ -560,11 +565,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
out:
/* update statistics */
/* update statistics after the move */
if (evict)
atomic64_inc(&adev->num_evictions);
atomic64_add(bo->base.size, &adev->num_bytes_moved);
amdgpu_bo_move_notify(bo, evict);
return 0;
}
@ -1605,7 +1609,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
amdgpu_bo_move_notify(bo, false);
amdgpu_bo_move_notify(bo, false, NULL);
}
static struct ttm_device_funcs amdgpu_bo_driver = {

View file

@ -61,6 +61,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
{
return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
}
static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
uint32_t inst_idx, struct amdgpu_ring *ring)
{
@ -86,7 +91,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
case AMDGPU_RING_TYPE_VCN_ENC:
case AMDGPU_RING_TYPE_VCN_JPEG:
ip_blk = AMDGPU_XCP_VCN;
if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
if (aqua_vanjaram_xcp_vcn_shared(adev))
inst_mask = 1 << (inst_idx * 2);
break;
default:
@ -139,10 +144,12 @@ static int aqua_vanjaram_xcp_sched_list_update(
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
/* VCN is shared by two partitions under CPX MODE */
/* VCN may be shared by two partitions under CPX MODE in certain
* configs.
*/
if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
aqua_vanjaram_xcp_vcn_shared(adev))
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
}

View file

@ -1613,19 +1613,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
DRAM_ECC_INT_ENABLE, 0);
WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
break;
/* sdma ecc interrupt is enabled by default
* driver doesn't need to do anything to
* enable the interrupt */
case AMDGPU_IRQ_STATE_ENABLE:
default:
break;
}
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
return 0;
}

View file

@ -1138,7 +1138,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_unlock;
}
offset = dev->adev->rmmio_remap.bus_addr;
if (!offset) {
if (!offset || (PAGE_SIZE > 4096)) {
err = -ENOMEM;
goto err_unlock;
}
@ -1516,7 +1516,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
/* Find a KFD GPU device that supports the get_dmabuf_info query */
for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
if (dev)
if (dev && !kfd_devcgroup_check_permission(dev))
break;
if (!dev)
return -EINVAL;
@ -1538,7 +1538,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
if (xcp_id >= 0)
args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
else
args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
args->gpu_id = dev->id;
args->flags = flags;
/* Copy metadata buffer to user mode */
@ -2307,7 +2307,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
return -EINVAL;
}
offset = pdd->dev->adev->rmmio_remap.bus_addr;
if (!offset) {
if (!offset || (PAGE_SIZE > 4096)) {
pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
return -ENOMEM;
}
@ -3348,6 +3348,9 @@ static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
if (PAGE_SIZE > 4096)
return -EINVAL;
address = dev->adev->rmmio_remap.bus_addr;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |

View file

@ -935,7 +935,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
struct kfd_node *node;
int i;
int count;
if (!kfd->init_complete)
return;
@ -943,12 +942,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
count = ++kfd_locked;
mutex_unlock(&kfd_processes_mutex);
/* For first KFD device suspend all the KFD processes */
if (count == 1)
if (++kfd_locked == 1)
kfd_suspend_all_processes();
mutex_unlock(&kfd_processes_mutex);
}
for (i = 0; i < kfd->num_nodes; i++) {
@ -959,7 +956,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
int ret, count, i;
int ret, i;
if (!kfd->init_complete)
return 0;
@ -973,12 +970,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
count = --kfd_locked;
mutex_unlock(&kfd_processes_mutex);
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
if (--kfd_locked == 0)
ret = kfd_resume_all_processes();
WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
mutex_unlock(&kfd_processes_mutex);
}
return ret;

View file

@ -336,7 +336,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
break;
}
kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23);
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),

View file

@ -325,7 +325,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
/* CP */
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)))
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),

View file

@ -385,7 +385,8 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
break;
}
kfd_signal_event_interrupt(pasid, sq_int_data, 24);
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),

View file

@ -2978,6 +2978,10 @@ static int dm_resume(void *handle)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
@ -5760,6 +5764,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
&aconnector->base.probed_modes :
&aconnector->base.modes;
if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
return NULL;
if (aconnector->freesync_vid_base.clock != 0)
return &aconnector->freesync_vid_base;
@ -8451,6 +8458,9 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
continue;
notify:
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);

View file

@ -1465,7 +1465,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -1566,7 +1568,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -1651,7 +1655,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -1750,7 +1756,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -1835,7 +1843,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -1934,7 +1944,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -2015,7 +2027,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -2111,7 +2125,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -2190,7 +2206,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -2246,7 +2264,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -2317,7 +2337,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@ -2388,7 +2410,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
pipe_ctx->stream->link == aconnector->dc_link &&
pipe_ctx->stream->sink &&
pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}

View file

@ -2961,6 +2961,7 @@ static enum bp_result construct_integrated_info(
result = get_integrated_info_v2_1(bp, info);
break;
case 2:
case 3:
result = get_integrated_info_v2_2(bp, info);
break;
default:

View file

@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
x),
25));
// If y rounds up to integer, carry it over to x.
if (y >> 25) {
x += 1;
y = 0;
}
switch (stream_encoder_inst) {
case 0:
REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,

View file

@ -226,7 +226,7 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (!en && !adev->in_s0ix) {
if (!en && adev->in_s4) {
/* Adds a GFX reset as workaround just before sending the
* MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
* an invalid state.

View file

@ -2933,7 +2933,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
dev->mode_config.max_width,
dev->mode_config.max_height);
else
drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe",
drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe\n",
connector->base.id, connector->name);
}

View file

@ -75,19 +75,6 @@ struct intel_audio_funcs {
struct intel_crtc_state *crtc_state);
};
/* DP N/M table */
#define LC_810M 810000
#define LC_540M 540000
#define LC_270M 270000
#define LC_162M 162000
struct dp_aud_n_m {
int sample_rate;
int clock;
u16 m;
u16 n;
};
struct hdmi_aud_ncts {
int sample_rate;
int clock;
@ -95,60 +82,6 @@ struct hdmi_aud_ncts {
int cts;
};
/* Values according to DP 1.4 Table 2-104 */
static const struct dp_aud_n_m dp_aud_n_m[] = {
{ 32000, LC_162M, 1024, 10125 },
{ 44100, LC_162M, 784, 5625 },
{ 48000, LC_162M, 512, 3375 },
{ 64000, LC_162M, 2048, 10125 },
{ 88200, LC_162M, 1568, 5625 },
{ 96000, LC_162M, 1024, 3375 },
{ 128000, LC_162M, 4096, 10125 },
{ 176400, LC_162M, 3136, 5625 },
{ 192000, LC_162M, 2048, 3375 },
{ 32000, LC_270M, 1024, 16875 },
{ 44100, LC_270M, 784, 9375 },
{ 48000, LC_270M, 512, 5625 },
{ 64000, LC_270M, 2048, 16875 },
{ 88200, LC_270M, 1568, 9375 },
{ 96000, LC_270M, 1024, 5625 },
{ 128000, LC_270M, 4096, 16875 },
{ 176400, LC_270M, 3136, 9375 },
{ 192000, LC_270M, 2048, 5625 },
{ 32000, LC_540M, 1024, 33750 },
{ 44100, LC_540M, 784, 18750 },
{ 48000, LC_540M, 512, 11250 },
{ 64000, LC_540M, 2048, 33750 },
{ 88200, LC_540M, 1568, 18750 },
{ 96000, LC_540M, 1024, 11250 },
{ 128000, LC_540M, 4096, 33750 },
{ 176400, LC_540M, 3136, 18750 },
{ 192000, LC_540M, 2048, 11250 },
{ 32000, LC_810M, 1024, 50625 },
{ 44100, LC_810M, 784, 28125 },
{ 48000, LC_810M, 512, 16875 },
{ 64000, LC_810M, 2048, 50625 },
{ 88200, LC_810M, 1568, 28125 },
{ 96000, LC_810M, 1024, 16875 },
{ 128000, LC_810M, 4096, 50625 },
{ 176400, LC_810M, 3136, 28125 },
{ 192000, LC_810M, 2048, 16875 },
};
static const struct dp_aud_n_m *
audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
if (rate == dp_aud_n_m[i].sample_rate &&
crtc_state->port_clock == dp_aud_n_m[i].clock)
return &dp_aud_n_m[i];
}
return NULL;
}
static const struct {
int clock;
u32 config;
@ -386,47 +319,17 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = i915->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
const struct dp_aud_n_m *nm;
int rate;
u32 tmp;
rate = acomp ? acomp->aud_sample_rate[port] : 0;
nm = audio_config_dp_get_n_m(crtc_state, rate);
if (nm)
drm_dbg_kms(&i915->drm, "using Maud %u, Naud %u\n", nm->m,
nm->n);
else
drm_dbg_kms(&i915->drm, "using automatic Maud, Naud\n");
/* Enable time stamps. Let HW calculate Maud/Naud values */
intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK |
AUD_CONFIG_UPPER_N_MASK |
AUD_CONFIG_LOWER_N_MASK |
AUD_CONFIG_N_PROG_ENABLE,
AUD_CONFIG_N_VALUE_INDEX);
tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp |= AUD_CONFIG_N_VALUE_INDEX;
if (nm) {
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(nm->n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
}
intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp);
tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_CONFIG_M_MASK;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
if (nm) {
tmp |= nm->m;
tmp |= AUD_M_CTS_M_VALUE_INDEX;
tmp |= AUD_M_CTS_M_PROG_ENABLE;
}
intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void

View file

@ -1035,22 +1035,11 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
panel->vbt.backlight.controller = 0;
if (i915->display.vbt.version >= 191) {
size_t exp_size;
const struct lfp_backlight_control_method *method;
if (i915->display.vbt.version >= 236)
exp_size = sizeof(struct bdb_lfp_backlight_data);
else if (i915->display.vbt.version >= 234)
exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
else
exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
if (get_blocksize(backlight_data) >= exp_size) {
const struct lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
panel->vbt.backlight.type = method->type;
panel->vbt.backlight.controller = method->controller;
}
method = &backlight_data->backlight_control[panel_type];
panel->vbt.backlight.type = method->type;
panel->vbt.backlight.controller = method->controller;
}
panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;

View file

@ -897,11 +897,6 @@ struct lfp_brightness_level {
u16 reserved;
} __packed;
#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
offsetof(struct bdb_lfp_backlight_data, brightness_level)
#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
struct bdb_lfp_backlight_data {
u8 entry_size;
struct lfp_backlight_data_entry data[16];

View file

@ -8,14 +8,14 @@
#include "intel_gt_ccs_mode.h"
#include "intel_gt_regs.h"
void intel_gt_apply_ccs_mode(struct intel_gt *gt)
unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
{
int cslice;
u32 mode = 0;
int first_ccs = __ffs(CCS_MASK(gt));
if (!IS_DG2(gt->i915))
return;
return 0;
/* Build the value for the fixed CCS load balancing */
for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
@ -35,5 +35,5 @@ void intel_gt_apply_ccs_mode(struct intel_gt *gt)
XEHP_CCS_MODE_CSLICE_MASK);
}
intel_uncore_write(gt->uncore, XEHP_CCS_MODE, mode);
return mode;
}

View file

@ -8,6 +8,6 @@
struct intel_gt;
void intel_gt_apply_ccs_mode(struct intel_gt *gt);
unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt);
#endif /* __INTEL_GT_CCS_MODE_H__ */

View file

@ -2828,6 +2828,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct intel_gt *gt = engine->gt;
u32 mode;
if (!IS_DG2(gt->i915))
return;
@ -2844,7 +2845,8 @@ static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_li
* After having disabled automatic load balancing we need to
* assign all slices to a single CCS. We will call it CCS mode 1
*/
intel_gt_apply_ccs_mode(gt);
mode = intel_gt_apply_ccs_mode(gt);
wa_masked_en(wal, XEHP_CCS_MODE, mode);
}
/*

View file

@ -24,6 +24,7 @@
#define noinline __attribute__((__noinline__))
#define noinline_for_stack __attribute__((__noinline__))
#define fallthrough do {} while (0)
#define __counted_by(x)
#define __PASTE(x,y) __CONCAT(x,y)

View file

@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
typedef struct _ATOM_PPLIB_STATE_V2
{
//number of valid dpm levels in this state; Driver uses it to calculate the whole
//size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
//size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos
@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
/**
* Driver will read the first ucNumDPMLevels in this array
*/
UCHAR clockInfoIndex[1];
UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
//how many states we have
UCHAR ucNumEntries;
ATOM_PPLIB_STATE_V2 states[1];
ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
}StateArray;
@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
UCHAR clockInfo[1];
UCHAR clockInfo[] __counted_by(ucNumEntries);
}ClockInfoArray;
typedef struct _NonClockInfoArray{
@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record

View file

@ -92,7 +92,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
*/
if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
page_flags |= TTM_TT_FLAG_DECRYPTED;
drm_info(ddev, "TT memory decryption enabled.");
drm_info_once(ddev, "TT memory decryption enabled.");
}
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_igc.c,v 1.23 2024/05/07 18:35:23 jan Exp $ */
/* $OpenBSD: if_igc.c,v 1.24 2024/05/21 11:19:39 bluhm Exp $ */
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
@ -107,21 +107,21 @@ void igc_setup_interface(struct igc_softc *);
void igc_init(void *);
void igc_start(struct ifqueue *);
int igc_txeof(struct tx_ring *);
int igc_txeof(struct igc_txring *);
void igc_stop(struct igc_softc *);
int igc_ioctl(struct ifnet *, u_long, caddr_t);
int igc_rxrinfo(struct igc_softc *, struct if_rxrinfo *);
int igc_rxfill(struct rx_ring *);
int igc_rxfill(struct igc_rxring *);
void igc_rxrefill(void *);
int igc_rxeof(struct rx_ring *);
int igc_rxeof(struct igc_rxring *);
void igc_rx_checksum(uint32_t, struct mbuf *, uint32_t);
void igc_watchdog(struct ifnet *);
void igc_media_status(struct ifnet *, struct ifmediareq *);
int igc_media_change(struct ifnet *);
void igc_iff(struct igc_softc *);
void igc_update_link_status(struct igc_softc *);
int igc_get_buf(struct rx_ring *, int);
int igc_tx_ctx_setup(struct tx_ring *, struct mbuf *, int, uint32_t *,
int igc_get_buf(struct igc_rxring *, int);
int igc_tx_ctx_setup(struct igc_txring *, struct mbuf *, int, uint32_t *,
uint32_t *);
void igc_configure_queues(struct igc_softc *);
@ -132,18 +132,18 @@ void igc_disable_intr(struct igc_softc *);
int igc_intr_link(void *);
int igc_intr_queue(void *);
int igc_allocate_transmit_buffers(struct tx_ring *);
int igc_allocate_transmit_buffers(struct igc_txring *);
int igc_setup_transmit_structures(struct igc_softc *);
int igc_setup_transmit_ring(struct tx_ring *);
int igc_setup_transmit_ring(struct igc_txring *);
void igc_initialize_transmit_unit(struct igc_softc *);
void igc_free_transmit_structures(struct igc_softc *);
void igc_free_transmit_buffers(struct tx_ring *);
int igc_allocate_receive_buffers(struct rx_ring *);
void igc_free_transmit_buffers(struct igc_txring *);
int igc_allocate_receive_buffers(struct igc_rxring *);
int igc_setup_receive_structures(struct igc_softc *);
int igc_setup_receive_ring(struct rx_ring *);
int igc_setup_receive_ring(struct igc_rxring *);
void igc_initialize_receive_unit(struct igc_softc *);
void igc_free_receive_structures(struct igc_softc *);
void igc_free_receive_buffers(struct rx_ring *);
void igc_free_receive_buffers(struct igc_rxring *);
void igc_initialize_rss_mapping(struct igc_softc *);
void igc_get_hw_control(struct igc_softc *);
@ -374,8 +374,8 @@ int
igc_allocate_queues(struct igc_softc *sc)
{
struct igc_queue *iq;
struct tx_ring *txr;
struct rx_ring *rxr;
struct igc_txring *txr;
struct igc_rxring *rxr;
int i, rsize, rxconf, tsize, txconf;
/* Allocate the top level queue structs. */
@ -387,7 +387,7 @@ igc_allocate_queues(struct igc_softc *sc)
}
/* Allocate the TX ring. */
sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring),
sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct igc_txring),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->tx_rings == NULL) {
printf("%s: unable to allocate TX ring\n", DEVNAME(sc));
@ -395,7 +395,7 @@ igc_allocate_queues(struct igc_softc *sc)
}
/* Allocate the RX ring. */
sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct rx_ring),
sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct igc_rxring),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->rx_rings == NULL) {
printf("%s: unable to allocate RX ring\n", DEVNAME(sc));
@ -456,10 +456,12 @@ err_rx_desc:
err_tx_desc:
for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
igc_dma_free(sc, &txr->txdma);
free(sc->rx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct rx_ring));
free(sc->rx_rings, M_DEVBUF,
sc->sc_nqueues * sizeof(struct igc_rxring));
sc->rx_rings = NULL;
rx_fail:
free(sc->tx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct tx_ring));
free(sc->tx_rings, M_DEVBUF,
sc->sc_nqueues * sizeof(struct igc_txring));
sc->tx_rings = NULL;
fail:
return ENOMEM;
@ -833,8 +835,8 @@ igc_setup_interface(struct igc_softc *sc)
for (i = 0; i < sc->sc_nqueues; i++) {
struct ifqueue *ifq = ifp->if_ifqs[i];
struct ifiqueue *ifiq = ifp->if_iqs[i];
struct tx_ring *txr = &sc->tx_rings[i];
struct rx_ring *rxr = &sc->rx_rings[i];
struct igc_txring *txr = &sc->tx_rings[i];
struct igc_rxring *rxr = &sc->rx_rings[i];
ifq->ifq_softc = txr;
txr->ifq = ifq;
@ -849,7 +851,7 @@ igc_init(void *arg)
{
struct igc_softc *sc = (struct igc_softc *)arg;
struct ifnet *ifp = &sc->sc_ac.ac_if;
struct rx_ring *rxr;
struct igc_rxring *rxr;
uint32_t ctrl = 0;
int i, s;
@ -959,7 +961,7 @@ igc_start(struct ifqueue *ifq)
{
struct ifnet *ifp = ifq->ifq_if;
struct igc_softc *sc = ifp->if_softc;
struct tx_ring *txr = ifq->ifq_softc;
struct igc_txring *txr = ifq->ifq_softc;
union igc_adv_tx_desc *txdesc;
struct igc_tx_buf *txbuf;
bus_dmamap_t map;
@ -1067,7 +1069,7 @@ igc_start(struct ifqueue *ifq)
}
int
igc_txeof(struct tx_ring *txr)
igc_txeof(struct igc_txring *txr)
{
struct igc_softc *sc = txr->sc;
struct ifqueue *ifq = txr->ifq;
@ -1223,7 +1225,7 @@ int
igc_rxrinfo(struct igc_softc *sc, struct if_rxrinfo *ifri)
{
struct if_rxring_info *ifr;
struct rx_ring *rxr;
struct igc_rxring *rxr;
int error, i, n = 0;
ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF,
@ -1244,7 +1246,7 @@ igc_rxrinfo(struct igc_softc *sc, struct if_rxrinfo *ifri)
}
int
igc_rxfill(struct rx_ring *rxr)
igc_rxfill(struct igc_rxring *rxr)
{
struct igc_softc *sc = rxr->sc;
int i, post = 0;
@ -1277,7 +1279,7 @@ igc_rxfill(struct rx_ring *rxr)
void
igc_rxrefill(void *xrxr)
{
struct rx_ring *rxr = xrxr;
struct igc_rxring *rxr = xrxr;
struct igc_softc *sc = rxr->sc;
if (igc_rxfill(rxr)) {
@ -1296,7 +1298,7 @@ igc_rxrefill(void *xrxr)
*
*********************************************************************/
int
igc_rxeof(struct rx_ring *rxr)
igc_rxeof(struct igc_rxring *rxr)
{
struct igc_softc *sc = rxr->sc;
struct ifnet *ifp = &sc->sc_ac.ac_if;
@ -1657,7 +1659,7 @@ igc_update_link_status(struct igc_softc *sc)
*
**********************************************************************/
int
igc_get_buf(struct rx_ring *rxr, int i)
igc_get_buf(struct igc_rxring *rxr, int i)
{
struct igc_softc *sc = rxr->sc;
struct igc_rx_buf *rxbuf;
@ -1812,8 +1814,8 @@ igc_intr_queue(void *arg)
struct igc_queue *iq = arg;
struct igc_softc *sc = iq->sc;
struct ifnet *ifp = &sc->sc_ac.ac_if;
struct rx_ring *rxr = iq->rxr;
struct tx_ring *txr = iq->txr;
struct igc_rxring *rxr = iq->rxr;
struct igc_txring *txr = iq->txr;
if (ifp->if_flags & IFF_RUNNING) {
igc_txeof(txr);
@ -1833,7 +1835,7 @@ igc_intr_queue(void *arg)
*
**********************************************************************/
int
igc_allocate_transmit_buffers(struct tx_ring *txr)
igc_allocate_transmit_buffers(struct igc_txring *txr)
{
struct igc_softc *sc = txr->sc;
struct igc_tx_buf *txbuf;
@ -1875,7 +1877,7 @@ fail:
int
igc_setup_transmit_structures(struct igc_softc *sc)
{
struct tx_ring *txr = sc->tx_rings;
struct igc_txring *txr = sc->tx_rings;
int i;
for (i = 0; i < sc->sc_nqueues; i++, txr++) {
@ -1895,7 +1897,7 @@ fail:
*
**********************************************************************/
int
igc_setup_transmit_ring(struct tx_ring *txr)
igc_setup_transmit_ring(struct igc_txring *txr)
{
struct igc_softc *sc = txr->sc;
@ -1927,7 +1929,7 @@ void
igc_initialize_transmit_unit(struct igc_softc *sc)
{
struct ifnet *ifp = &sc->sc_ac.ac_if;
struct tx_ring *txr;
struct igc_txring *txr;
struct igc_hw *hw = &sc->hw;
uint64_t bus_addr;
uint32_t tctl, txdctl = 0;
@ -1981,7 +1983,7 @@ igc_initialize_transmit_unit(struct igc_softc *sc)
void
igc_free_transmit_structures(struct igc_softc *sc)
{
struct tx_ring *txr = sc->tx_rings;
struct igc_txring *txr = sc->tx_rings;
int i;
for (i = 0; i < sc->sc_nqueues; i++, txr++)
@ -1994,7 +1996,7 @@ igc_free_transmit_structures(struct igc_softc *sc)
*
**********************************************************************/
void
igc_free_transmit_buffers(struct tx_ring *txr)
igc_free_transmit_buffers(struct igc_txring *txr)
{
struct igc_softc *sc = txr->sc;
struct igc_tx_buf *txbuf;
@ -2035,7 +2037,7 @@ igc_free_transmit_buffers(struct tx_ring *txr)
**********************************************************************/
int
igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod,
igc_tx_ctx_setup(struct igc_txring *txr, struct mbuf *mp, int prod,
uint32_t *cmd_type_len, uint32_t *olinfo_status)
{
struct ether_extracted ext;
@ -2140,7 +2142,7 @@ igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod,
*
**********************************************************************/
int
igc_allocate_receive_buffers(struct rx_ring *rxr)
igc_allocate_receive_buffers(struct igc_rxring *rxr)
{
struct igc_softc *sc = rxr->sc;
struct igc_rx_buf *rxbuf;
@ -2183,7 +2185,7 @@ fail:
int
igc_setup_receive_structures(struct igc_softc *sc)
{
struct rx_ring *rxr = sc->rx_rings;
struct igc_rxring *rxr = sc->rx_rings;
int i;
for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
@ -2203,7 +2205,7 @@ fail:
*
**********************************************************************/
int
igc_setup_receive_ring(struct rx_ring *rxr)
igc_setup_receive_ring(struct igc_rxring *rxr)
{
struct igc_softc *sc = rxr->sc;
struct ifnet *ifp = &sc->sc_ac.ac_if;
@ -2238,7 +2240,7 @@ igc_setup_receive_ring(struct rx_ring *rxr)
void
igc_initialize_receive_unit(struct igc_softc *sc)
{
struct rx_ring *rxr = sc->rx_rings;
struct igc_rxring *rxr = sc->rx_rings;
struct igc_hw *hw = &sc->hw;
uint32_t rctl, rxcsum, srrctl = 0;
int i;
@ -2342,7 +2344,7 @@ igc_initialize_receive_unit(struct igc_softc *sc)
void
igc_free_receive_structures(struct igc_softc *sc)
{
struct rx_ring *rxr;
struct igc_rxring *rxr;
int i;
for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
@ -2358,7 +2360,7 @@ igc_free_receive_structures(struct igc_softc *sc)
*
**********************************************************************/
void
igc_free_receive_buffers(struct rx_ring *rxr)
igc_free_receive_buffers(struct igc_rxring *rxr)
{
struct igc_softc *sc = rxr->sc;
struct igc_rx_buf *rxbuf;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_igc.h,v 1.3 2024/05/06 04:25:52 dlg Exp $ */
/* $OpenBSD: if_igc.h,v 1.4 2024/05/21 11:19:39 bluhm Exp $ */
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
@ -250,14 +250,14 @@ struct igc_queue {
char name[16];
pci_intr_handle_t ih;
void *tag;
struct tx_ring *txr;
struct rx_ring *rxr;
struct igc_txring *txr;
struct igc_rxring *rxr;
};
/*
* The transmit ring, one per tx queue.
*/
struct tx_ring {
struct igc_txring {
struct igc_softc *sc;
struct ifqueue *ifq;
uint32_t me;
@ -273,7 +273,7 @@ struct tx_ring {
/*
* The Receive ring, one per rx queue.
*/
struct rx_ring {
struct igc_rxring {
struct igc_softc *sc;
struct ifiqueue *ifiq;
uint32_t me;
@ -316,8 +316,8 @@ struct igc_softc {
unsigned int sc_nqueues;
struct igc_queue *queues;
struct tx_ring *tx_rings;
struct rx_ring *rx_rings;
struct igc_txring *tx_rings;
struct igc_rxring *rx_rings;
/* Multicast array memory */
uint8_t *mta;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_ix.c,v 1.214 2024/05/13 01:15:51 jsg Exp $ */
/* $OpenBSD: if_ix.c,v 1.215 2024/05/21 11:19:39 bluhm Exp $ */
/******************************************************************************
@ -131,39 +131,39 @@ void ixgbe_config_delay_values(struct ix_softc *);
void ixgbe_add_media_types(struct ix_softc *);
void ixgbe_config_link(struct ix_softc *);
int ixgbe_allocate_transmit_buffers(struct tx_ring *);
int ixgbe_allocate_transmit_buffers(struct ix_txring *);
int ixgbe_setup_transmit_structures(struct ix_softc *);
int ixgbe_setup_transmit_ring(struct tx_ring *);
int ixgbe_setup_transmit_ring(struct ix_txring *);
void ixgbe_initialize_transmit_units(struct ix_softc *);
void ixgbe_free_transmit_structures(struct ix_softc *);
void ixgbe_free_transmit_buffers(struct tx_ring *);
void ixgbe_free_transmit_buffers(struct ix_txring *);
int ixgbe_allocate_receive_buffers(struct rx_ring *);
int ixgbe_allocate_receive_buffers(struct ix_rxring *);
int ixgbe_setup_receive_structures(struct ix_softc *);
int ixgbe_setup_receive_ring(struct rx_ring *);
int ixgbe_setup_receive_ring(struct ix_rxring *);
void ixgbe_initialize_receive_units(struct ix_softc *);
void ixgbe_free_receive_structures(struct ix_softc *);
void ixgbe_free_receive_buffers(struct rx_ring *);
void ixgbe_free_receive_buffers(struct ix_rxring *);
void ixgbe_initialize_rss_mapping(struct ix_softc *);
int ixgbe_rxfill(struct rx_ring *);
int ixgbe_rxfill(struct ix_rxring *);
void ixgbe_rxrefill(void *);
int ixgbe_intr(struct ix_softc *sc);
void ixgbe_enable_intr(struct ix_softc *);
void ixgbe_disable_intr(struct ix_softc *);
int ixgbe_txeof(struct tx_ring *);
int ixgbe_rxeof(struct rx_ring *);
int ixgbe_txeof(struct ix_txring *);
int ixgbe_rxeof(struct ix_rxring *);
void ixgbe_rx_offload(uint32_t, uint16_t, struct mbuf *);
void ixgbe_iff(struct ix_softc *);
void ixgbe_map_queue_statistics(struct ix_softc *);
void ixgbe_update_link_status(struct ix_softc *);
int ixgbe_get_buf(struct rx_ring *, int);
int ixgbe_encap(struct tx_ring *, struct mbuf *);
int ixgbe_get_buf(struct ix_rxring *, int);
int ixgbe_encap(struct ix_txring *, struct mbuf *);
int ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
struct ixgbe_dma_alloc *, int);
void ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
static int
ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *, uint32_t *,
ixgbe_tx_ctx_setup(struct ix_txring *, struct mbuf *, uint32_t *,
uint32_t *);
void ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
void ixgbe_configure_ivars(struct ix_softc *);
@ -188,8 +188,8 @@ int ixgbe_queue_intr(void *);
#if NKSTAT > 0
static void ix_kstats(struct ix_softc *);
static void ix_rxq_kstats(struct ix_softc *, struct rx_ring *);
static void ix_txq_kstats(struct ix_softc *, struct tx_ring *);
static void ix_rxq_kstats(struct ix_softc *, struct ix_rxring *);
static void ix_txq_kstats(struct ix_softc *, struct ix_txring *);
static void ix_kstats_tick(void *);
#endif
@ -451,7 +451,7 @@ ixgbe_start(struct ifqueue *ifq)
{
struct ifnet *ifp = ifq->ifq_if;
struct ix_softc *sc = ifp->if_softc;
struct tx_ring *txr = ifq->ifq_softc;
struct ix_txring *txr = ifq->ifq_softc;
struct mbuf *m_head;
unsigned int head, free, used;
int post = 0;
@ -639,7 +639,7 @@ int
ixgbe_rxrinfo(struct ix_softc *sc, struct if_rxrinfo *ifri)
{
struct if_rxring_info *ifr, ifr1;
struct rx_ring *rxr;
struct ix_rxring *rxr;
int error, i;
u_int n = 0;
@ -673,7 +673,7 @@ void
ixgbe_watchdog(struct ifnet * ifp)
{
struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
struct tx_ring *txr = sc->tx_rings;
struct ix_txring *txr = sc->tx_rings;
struct ixgbe_hw *hw = &sc->hw;
int tx_hang = FALSE;
int i;
@ -735,7 +735,7 @@ ixgbe_init(void *arg)
{
struct ix_softc *sc = (struct ix_softc *)arg;
struct ifnet *ifp = &sc->arpcom.ac_if;
struct rx_ring *rxr = sc->rx_rings;
struct ix_rxring *rxr = sc->rx_rings;
uint32_t k, txdctl, rxdctl, rxctrl, mhadd, itr;
int i, s, err;
@ -1076,8 +1076,8 @@ ixgbe_queue_intr(void *vque)
struct ix_queue *que = vque;
struct ix_softc *sc = que->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
struct rx_ring *rxr = que->rxr;
struct tx_ring *txr = que->txr;
struct ix_rxring *rxr = que->rxr;
struct ix_txring *txr = que->txr;
if (ISSET(ifp->if_flags, IFF_RUNNING)) {
ixgbe_rxeof(rxr);
@ -1101,8 +1101,8 @@ ixgbe_legacy_intr(void *arg)
{
struct ix_softc *sc = (struct ix_softc *)arg;
struct ifnet *ifp = &sc->arpcom.ac_if;
struct rx_ring *rxr = sc->rx_rings;
struct tx_ring *txr = sc->tx_rings;
struct ix_rxring *rxr = sc->rx_rings;
struct ix_txring *txr = sc->tx_rings;
int rv;
rv = ixgbe_intr(sc);
@ -1423,7 +1423,7 @@ ixgbe_media_change(struct ifnet *ifp)
**********************************************************************/
int
ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
ixgbe_encap(struct ix_txring *txr, struct mbuf *m_head)
{
struct ix_softc *sc = txr->sc;
uint32_t olinfo_status = 0, cmd_type_len;
@ -1953,8 +1953,8 @@ ixgbe_setup_interface(struct ix_softc *sc)
for (i = 0; i < sc->num_queues; i++) {
struct ifqueue *ifq = ifp->if_ifqs[i];
struct ifiqueue *ifiq = ifp->if_iqs[i];
struct tx_ring *txr = &sc->tx_rings[i];
struct rx_ring *rxr = &sc->rx_rings[i];
struct ix_txring *txr = &sc->tx_rings[i];
struct ix_rxring *rxr = &sc->rx_rings[i];
ifq->ifq_softc = txr;
txr->ifq = ifq;
@ -2142,8 +2142,8 @@ ixgbe_allocate_queues(struct ix_softc *sc)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
struct ix_queue *que;
struct tx_ring *txr;
struct rx_ring *rxr;
struct ix_txring *txr;
struct ix_rxring *rxr;
int rsize, tsize;
int txconf = 0, rxconf = 0, i;
@ -2156,14 +2156,14 @@ ixgbe_allocate_queues(struct ix_softc *sc)
/* Then allocate the TX ring struct memory */
if (!(sc->tx_rings = mallocarray(sc->num_queues,
sizeof(struct tx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
sizeof(struct ix_txring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
goto fail;
}
/* Next allocate the RX */
if (!(sc->rx_rings = mallocarray(sc->num_queues,
sizeof(struct rx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
sizeof(struct ix_rxring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
goto rx_fail;
}
@ -2235,10 +2235,10 @@ err_rx_desc:
err_tx_desc:
for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
ixgbe_dma_free(sc, &txr->txdma);
free(sc->rx_rings, M_DEVBUF, sc->num_queues * sizeof(struct rx_ring));
free(sc->rx_rings, M_DEVBUF, sc->num_queues * sizeof(struct ix_rxring));
sc->rx_rings = NULL;
rx_fail:
free(sc->tx_rings, M_DEVBUF, sc->num_queues * sizeof(struct tx_ring));
free(sc->tx_rings, M_DEVBUF, sc->num_queues * sizeof(struct ix_txring));
sc->tx_rings = NULL;
fail:
return (ENOMEM);
@ -2252,7 +2252,7 @@ fail:
*
**********************************************************************/
int
ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
ixgbe_allocate_transmit_buffers(struct ix_txring *txr)
{
struct ix_softc *sc = txr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
@ -2293,7 +2293,7 @@ fail:
*
**********************************************************************/
int
ixgbe_setup_transmit_ring(struct tx_ring *txr)
ixgbe_setup_transmit_ring(struct ix_txring *txr)
{
struct ix_softc *sc = txr->sc;
int error;
@ -2325,7 +2325,7 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr)
int
ixgbe_setup_transmit_structures(struct ix_softc *sc)
{
struct tx_ring *txr = sc->tx_rings;
struct ix_txring *txr = sc->tx_rings;
int i, error;
for (i = 0; i < sc->num_queues; i++, txr++) {
@ -2348,7 +2348,7 @@ void
ixgbe_initialize_transmit_units(struct ix_softc *sc)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
struct tx_ring *txr;
struct ix_txring *txr;
struct ixgbe_hw *hw = &sc->hw;
int i;
uint64_t tdba;
@ -2430,7 +2430,7 @@ ixgbe_initialize_transmit_units(struct ix_softc *sc)
void
ixgbe_free_transmit_structures(struct ix_softc *sc)
{
struct tx_ring *txr = sc->tx_rings;
struct ix_txring *txr = sc->tx_rings;
int i;
for (i = 0; i < sc->num_queues; i++, txr++)
@ -2443,7 +2443,7 @@ ixgbe_free_transmit_structures(struct ix_softc *sc)
*
**********************************************************************/
void
ixgbe_free_transmit_buffers(struct tx_ring *txr)
ixgbe_free_transmit_buffers(struct ix_txring *txr)
{
struct ix_softc *sc = txr->sc;
struct ixgbe_tx_buf *tx_buffer;
@ -2561,7 +2561,7 @@ ixgbe_tx_offload(struct mbuf *mp, uint32_t *vlan_macip_lens,
}
static int
ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
ixgbe_tx_ctx_setup(struct ix_txring *txr, struct mbuf *mp,
uint32_t *cmd_type_len, uint32_t *olinfo_status)
{
struct ixgbe_adv_tx_context_desc *TXD;
@ -2614,7 +2614,7 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
*
**********************************************************************/
int
ixgbe_txeof(struct tx_ring *txr)
ixgbe_txeof(struct ix_txring *txr)
{
struct ix_softc *sc = txr->sc;
struct ifqueue *ifq = txr->ifq;
@ -2684,7 +2684,7 @@ ixgbe_txeof(struct tx_ring *txr)
*
**********************************************************************/
int
ixgbe_get_buf(struct rx_ring *rxr, int i)
ixgbe_get_buf(struct ix_rxring *rxr, int i)
{
struct ix_softc *sc = rxr->sc;
struct ixgbe_rx_buf *rxbuf;
@ -2733,7 +2733,7 @@ ixgbe_get_buf(struct rx_ring *rxr, int i)
*
**********************************************************************/
int
ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
ixgbe_allocate_receive_buffers(struct ix_rxring *rxr)
{
struct ix_softc *sc = rxr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
@ -2774,7 +2774,7 @@ fail:
*
**********************************************************************/
int
ixgbe_setup_receive_ring(struct rx_ring *rxr)
ixgbe_setup_receive_ring(struct ix_rxring *rxr)
{
struct ix_softc *sc = rxr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
@ -2806,7 +2806,7 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
}
int
ixgbe_rxfill(struct rx_ring *rxr)
ixgbe_rxfill(struct ix_rxring *rxr)
{
struct ix_softc *sc = rxr->sc;
int post = 0;
@ -2842,7 +2842,7 @@ ixgbe_rxfill(struct rx_ring *rxr)
void
ixgbe_rxrefill(void *xrxr)
{
struct rx_ring *rxr = xrxr;
struct ix_rxring *rxr = xrxr;
struct ix_softc *sc = rxr->sc;
if (ixgbe_rxfill(rxr)) {
@ -2862,7 +2862,7 @@ ixgbe_rxrefill(void *xrxr)
int
ixgbe_setup_receive_structures(struct ix_softc *sc)
{
struct rx_ring *rxr = sc->rx_rings;
struct ix_rxring *rxr = sc->rx_rings;
int i;
for (i = 0; i < sc->num_queues; i++, rxr++)
@ -2886,7 +2886,7 @@ void
ixgbe_initialize_receive_units(struct ix_softc *sc)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
struct rx_ring *rxr = sc->rx_rings;
struct ix_rxring *rxr = sc->rx_rings;
struct ixgbe_hw *hw = &sc->hw;
uint32_t bufsz, fctrl, srrctl, rxcsum, rdrxctl;
uint32_t hlreg;
@ -3061,7 +3061,7 @@ ixgbe_initialize_rss_mapping(struct ix_softc *sc)
void
ixgbe_free_receive_structures(struct ix_softc *sc)
{
struct rx_ring *rxr;
struct ix_rxring *rxr;
int i;
for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
@ -3077,7 +3077,7 @@ ixgbe_free_receive_structures(struct ix_softc *sc)
*
**********************************************************************/
void
ixgbe_free_receive_buffers(struct rx_ring *rxr)
ixgbe_free_receive_buffers(struct ix_rxring *rxr)
{
struct ix_softc *sc;
struct ixgbe_rx_buf *rxbuf;
@ -3116,7 +3116,7 @@ ixgbe_free_receive_buffers(struct rx_ring *rxr)
*
*********************************************************************/
int
ixgbe_rxeof(struct rx_ring *rxr)
ixgbe_rxeof(struct ix_rxring *rxr)
{
struct ix_softc *sc = rxr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
@ -3850,7 +3850,7 @@ ix_kstats(struct ix_softc *sc)
}
static void
ix_rxq_kstats(struct ix_softc *sc, struct rx_ring *rxr)
ix_rxq_kstats(struct ix_softc *sc, struct ix_rxring *rxr)
{
struct ix_rxq_kstats *stats;
struct kstat *ks;
@ -3874,7 +3874,7 @@ ix_rxq_kstats(struct ix_softc *sc, struct rx_ring *rxr)
}
static void
ix_txq_kstats(struct ix_softc *sc, struct tx_ring *txr)
ix_txq_kstats(struct ix_softc *sc, struct ix_txring *txr)
{
struct ix_txq_kstats *stats;
struct kstat *ks;
@ -3980,7 +3980,7 @@ int
ix_rxq_kstats_read(struct kstat *ks)
{
struct ix_rxq_kstats *stats = ks->ks_data;
struct rx_ring *rxr = ks->ks_softc;
struct ix_rxring *rxr = ks->ks_softc;
struct ix_softc *sc = rxr->sc;
struct ixgbe_hw *hw = &sc->hw;
uint32_t i = rxr->me;
@ -4007,7 +4007,7 @@ int
ix_txq_kstats_read(struct kstat *ks)
{
struct ix_txq_kstats *stats = ks->ks_data;
struct tx_ring *txr = ks->ks_softc;
struct ix_txring *txr = ks->ks_softc;
struct ix_softc *sc = txr->sc;
struct ixgbe_hw *hw = &sc->hw;
uint32_t i = txr->me;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_ix.h,v 1.46 2023/08/04 10:58:27 jan Exp $ */
/* $OpenBSD: if_ix.h,v 1.47 2024/05/21 11:19:39 bluhm Exp $ */
/******************************************************************************
@ -158,14 +158,14 @@ struct ix_queue {
char name[8];
pci_intr_handle_t ih;
void *tag;
struct tx_ring *txr;
struct rx_ring *rxr;
struct ix_txring *txr;
struct ix_rxring *rxr;
};
/*
* The transmit ring, one per tx queue
*/
struct tx_ring {
struct ix_txring {
struct ix_softc *sc;
struct ifqueue *ifq;
uint32_t me;
@ -190,7 +190,7 @@ struct tx_ring {
/*
* The Receive ring, one per rx queue
*/
struct rx_ring {
struct ix_rxring {
struct ix_softc *sc;
struct ifiqueue *ifiq;
uint32_t me;
@ -262,14 +262,14 @@ struct ix_softc {
* Transmit rings:
* Allocated at run time, an array of rings.
*/
struct tx_ring *tx_rings;
struct ix_txring *tx_rings;
int num_tx_desc;
/*
* Receive rings:
* Allocated at run time, an array of rings.
*/
struct rx_ring *rx_rings;
struct ix_rxring *rx_rings;
uint64_t que_mask;
int num_rx_desc;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_mwx.c,v 1.2 2024/02/21 12:08:05 jsg Exp $ */
/* $OpenBSD: if_mwx.c,v 1.3 2024/05/20 21:22:43 martijn Exp $ */
/*
* Copyright (c) 2022 Claudio Jeker <claudio@openbsd.org>
* Copyright (c) 2021 MediaTek Inc.
@ -1426,7 +1426,7 @@ mwx_txwi_alloc(struct mwx_softc *sc, int count)
}
}
for (i = count; i >= MT_PACKET_ID_FIRST; i--)
for (i = count - 1; i >= MT_PACKET_ID_FIRST; i--)
LIST_INSERT_HEAD(&q->mt_freelist, &q->mt_data[i], mt_entry);
return 0;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_rge.c,v 1.24 2024/04/13 23:44:11 jsg Exp $ */
/* $OpenBSD: if_rge.c,v 1.25 2024/05/20 01:51:32 kevlo Exp $ */
/*
* Copyright (c) 2019, 2020, 2023 Kevin Lo <kevlo@openbsd.org>
@ -209,7 +209,8 @@ rge_attach(struct device *parent, struct device *self, void *aux)
/*
* Allocate interrupt.
*/
if (pci_intr_map_msi(pa, &ih) == 0)
if (pci_intr_map_msix(pa, 0, &ih) == 0 ||
pci_intr_map_msi(pa, &ih) == 0)
sc->rge_flags |= RGE_FLAG_MSI;
else if (pci_intr_map(pa, &ih) != 0) {
printf(": couldn't map interrupt\n");

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_vmx.c,v 1.85 2024/05/13 01:15:51 jsg Exp $ */
/* $OpenBSD: if_vmx.c,v 1.86 2024/05/21 19:49:06 jan Exp $ */
/*
* Copyright (c) 2013 Tsubai Masanari
@ -203,7 +203,7 @@ void vmxnet3_rxintr(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
void vmxnet3_rxfill_tick(void *);
void vmxnet3_rxfill(struct vmxnet3_rxring *);
void vmxnet3_iff(struct vmxnet3_softc *);
void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
void vmxnet3_rx_offload(struct vmxnet3_rxcompdesc *, struct mbuf *);
void vmxnet3_stop(struct ifnet *);
void vmxnet3_reset(struct vmxnet3_softc *);
int vmxnet3_init(struct vmxnet3_softc *);
@ -1127,14 +1127,8 @@ vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
}
m->m_pkthdr.len = m->m_len = len;
vmxnet3_rx_csum(rxcd, m);
#if NVLAN > 0
if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_VLAN)) {
m->m_flags |= M_VLANTAG;
m->m_pkthdr.ether_vtag = letoh32((rxcd->rxc_word2 >>
VMXNET3_RXC_VLANTAG_S) & VMXNET3_RXC_VLANTAG_M);
}
#endif
vmxnet3_rx_offload(rxcd, m);
if (((letoh32(rxcd->rxc_word0) >> VMXNET3_RXC_RSSTYPE_S) &
VMXNET3_RXC_RSSTYPE_M) != VMXNET3_RXC_RSSTYPE_NONE) {
m->m_pkthdr.ph_flowid = letoh32(rxcd->rxc_word1);
@ -1215,22 +1209,39 @@ vmxnet3_iff(struct vmxnet3_softc *sc)
void
vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
vmxnet3_rx_offload(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
{
if (letoh32(rxcd->rxc_word0 & VMXNET3_RXC_NOCSUM))
/*
* VLAN Offload
*/
#if NVLAN > 0
if (ISSET(rxcd->rxc_word2, VMXNET3_RXC_VLAN)) {
SET(m->m_flags, M_VLANTAG);
m->m_pkthdr.ether_vtag = letoh32((rxcd->rxc_word2 >>
VMXNET3_RXC_VLANTAG_S) & VMXNET3_RXC_VLANTAG_M);
}
#endif
/*
* Checksum Offload
*/
if (ISSET(rxcd->rxc_word0, VMXNET3_RXC_NOCSUM))
return;
if ((rxcd->rxc_word3 & (VMXNET3_RXC_IPV4 | VMXNET3_RXC_IPSUM_OK)) ==
(VMXNET3_RXC_IPV4 | VMXNET3_RXC_IPSUM_OK))
m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
if (ISSET(rxcd->rxc_word3, VMXNET3_RXC_IPV4) &&
ISSET(rxcd->rxc_word3, VMXNET3_RXC_IPSUM_OK))
SET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_OK);
if (rxcd->rxc_word3 & VMXNET3_RXC_FRAGMENT)
if (ISSET(rxcd->rxc_word3, VMXNET3_RXC_FRAGMENT))
return;
if (rxcd->rxc_word3 & (VMXNET3_RXC_TCP | VMXNET3_RXC_UDP)) {
if (rxcd->rxc_word3 & VMXNET3_RXC_CSUM_OK)
m->m_pkthdr.csum_flags |=
M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
if (ISSET(rxcd->rxc_word3, VMXNET3_RXC_CSUM_OK)) {
if (ISSET(rxcd->rxc_word3, VMXNET3_RXC_TCP))
SET(m->m_pkthdr.csum_flags, M_TCP_CSUM_IN_OK);
else if (ISSET(rxcd->rxc_word3, VMXNET3_RXC_UDP))
SET(m->m_pkthdr.csum_flags, M_UDP_CSUM_IN_OK);
}
}

View file

@ -1,4 +1,4 @@
$OpenBSD: pcidevs,v 1.2074 2024/04/20 08:54:01 jsg Exp $
$OpenBSD: pcidevs,v 1.2075 2024/05/21 07:03:55 jsg Exp $
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
/*
@ -790,8 +790,8 @@ product AMD 19_1X_PCIE_1 0x14a5 19h/1xh PCIE
product AMD 19_1X_RCEC 0x14a6 19h/1xh RCEC
product AMD 19_1X_PCIE_2 0x14a7 19h/1xh PCIE
product AMD 19_1X_PCIE_3 0x14aa 19h/1xh PCIE
product AMD 19_1X_PCIE_4 0x14ac 19h/1xh PCIE
product AMD 19_1X_PCIE_5 0x14ab 19h/1xh PCIE
product AMD 19_1X_PCIE_4 0x14ab 19h/1xh PCIE
product AMD 19_1X_PCIE_5 0x14ac 19h/1xh PCIE
product AMD 19_1X_DF_1 0x14ad 19h/1xh Data Fabric
product AMD 19_1X_DF_2 0x14ae 19h/1xh Data Fabric
product AMD 19_1X_DF_3 0x14af 19h/1xh Data Fabric
@ -1177,8 +1177,8 @@ product AQUANTIA AQC107 0x07b1 AQC107
product AQUANTIA AQC108 0x08b1 AQC108
product AQUANTIA AQC109 0x09b1 AQC109
product AQUANTIA AQC111 0x11b1 AQC111
product AQUANTIA AQC112 0x12b1 AQC112
product AQUANTIA AQC116C 0x11c0 AQC116C
product AQUANTIA AQC112 0x12b1 AQC112
product AQUANTIA AQC115C 0x12c0 AQC115C
product AQUANTIA AQC113C 0x14c0 AQC113C
product AQUANTIA AQC113CA 0x34c0 AQC113CA
@ -8800,10 +8800,10 @@ product REALTEK RTL8723AE 0x8723 8723AE
product REALTEK RTL8821AE 0x8821 8821AE
product REALTEK RTL8852AE 0x8852 8852AE
product REALTEK RTL8852AE_VT 0xa85a 8852AE-VT
product REALTEK RTL8852BE 0xb852 8852BE
product REALTEK RTL8852BE_2 0xb85b 8852BE
product REALTEK RTL8723BE 0xb723 8723BE
product REALTEK RTL8822BE 0xb822 8822BE
product REALTEK RTL8852BE 0xb852 8852BE
product REALTEK RTL8852BE_2 0xb85b 8852BE
product REALTEK RTL8821CE 0xc821 8821CE
product REALTEK RTL8822CE 0xc822 8822CE
product REALTEK RTL8852CE 0xc852 8852CE
@ -9337,11 +9337,11 @@ product SYMBIOS YELLOWFIN_1 0x0701 Yellowfin
product SYMBIOS YELLOWFIN_2 0x0702 Yellowfin
product SYMBIOS 61C102 0x0901 61C102
product SYMBIOS 63C815 0x1000 63C815
product SYMBIOS 1030R 0x1030 53c1030R
product SYMBIOS MEGARAID_39XX 0x10e1 MegaRAID SAS39XX
product SYMBIOS MEGARAID_39XX_2 0x10e2 MegaRAID SAS39XX
product SYMBIOS MEGARAID_38XX 0x10e5 MegaRAID SAS38XX
product SYMBIOS MEGARAID_38XX_2 0x10e6 MegaRAID SAS38XX
product SYMBIOS 1030R 0x1030 53c1030R
product SYMBIOS MEGARAID 0x1960 MegaRAID
/* Packet Engines products */
@ -9958,16 +9958,16 @@ product VORTEX GDT_8X22RZ 0x02f6 GDT8x22RZ
product VORTEX GDT_ICP 0x0300 ICP
product VORTEX GDT_ICP2 0x0301 ICP
/* Beijing WangXun Technology products */
product WANGXUN WX1860A2 0x0101 WX1860A2
product WANGXUN WX1860AL1 0x010b WX1860AL1
/* Nanjing QinHeng Electronics products */
product WCH CH352 0x3253 CH352
product WCH2 CH351 0x2273 CH351
product WCH2 CH382_2 0x3250 CH382
product WCH2 CH382_1 0x3253 CH382
/* Beijing WangXun Technology products */
product WANGXUN WX1860A2 0x0101 WX1860A2
product WANGXUN WX1860AL1 0x010b WX1860AL1
/* Western Digital products */
product WD WD33C193A 0x0193 WD33C193A
product WD WD33C196A 0x0196 WD33C196A

View file

@ -2,7 +2,7 @@
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* generated from:
* OpenBSD: pcidevs,v 1.2074 2024/04/20 08:54:01 jsg Exp
* OpenBSD: pcidevs,v 1.2075 2024/05/21 07:03:55 jsg Exp
*/
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
@ -795,8 +795,8 @@
#define PCI_PRODUCT_AMD_19_1X_RCEC 0x14a6 /* 19h/1xh RCEC */
#define PCI_PRODUCT_AMD_19_1X_PCIE_2 0x14a7 /* 19h/1xh PCIE */
#define PCI_PRODUCT_AMD_19_1X_PCIE_3 0x14aa /* 19h/1xh PCIE */
#define PCI_PRODUCT_AMD_19_1X_PCIE_4 0x14ac /* 19h/1xh PCIE */
#define PCI_PRODUCT_AMD_19_1X_PCIE_5 0x14ab /* 19h/1xh PCIE */
#define PCI_PRODUCT_AMD_19_1X_PCIE_4 0x14ab /* 19h/1xh PCIE */
#define PCI_PRODUCT_AMD_19_1X_PCIE_5 0x14ac /* 19h/1xh PCIE */
#define PCI_PRODUCT_AMD_19_1X_DF_1 0x14ad /* 19h/1xh Data Fabric */
#define PCI_PRODUCT_AMD_19_1X_DF_2 0x14ae /* 19h/1xh Data Fabric */
#define PCI_PRODUCT_AMD_19_1X_DF_3 0x14af /* 19h/1xh Data Fabric */
@ -1182,8 +1182,8 @@
#define PCI_PRODUCT_AQUANTIA_AQC108 0x08b1 /* AQC108 */
#define PCI_PRODUCT_AQUANTIA_AQC109 0x09b1 /* AQC109 */
#define PCI_PRODUCT_AQUANTIA_AQC111 0x11b1 /* AQC111 */
#define PCI_PRODUCT_AQUANTIA_AQC112 0x12b1 /* AQC112 */
#define PCI_PRODUCT_AQUANTIA_AQC116C 0x11c0 /* AQC116C */
#define PCI_PRODUCT_AQUANTIA_AQC112 0x12b1 /* AQC112 */
#define PCI_PRODUCT_AQUANTIA_AQC115C 0x12c0 /* AQC115C */
#define PCI_PRODUCT_AQUANTIA_AQC113C 0x14c0 /* AQC113C */
#define PCI_PRODUCT_AQUANTIA_AQC113CA 0x34c0 /* AQC113CA */
@ -8805,10 +8805,10 @@
#define PCI_PRODUCT_REALTEK_RTL8821AE 0x8821 /* 8821AE */
#define PCI_PRODUCT_REALTEK_RTL8852AE 0x8852 /* 8852AE */
#define PCI_PRODUCT_REALTEK_RTL8852AE_VT 0xa85a /* 8852AE-VT */
#define PCI_PRODUCT_REALTEK_RTL8852BE 0xb852 /* 8852BE */
#define PCI_PRODUCT_REALTEK_RTL8852BE_2 0xb85b /* 8852BE */
#define PCI_PRODUCT_REALTEK_RTL8723BE 0xb723 /* 8723BE */
#define PCI_PRODUCT_REALTEK_RTL8822BE 0xb822 /* 8822BE */
#define PCI_PRODUCT_REALTEK_RTL8852BE 0xb852 /* 8852BE */
#define PCI_PRODUCT_REALTEK_RTL8852BE_2 0xb85b /* 8852BE */
#define PCI_PRODUCT_REALTEK_RTL8821CE 0xc821 /* 8821CE */
#define PCI_PRODUCT_REALTEK_RTL8822CE 0xc822 /* 8822CE */
#define PCI_PRODUCT_REALTEK_RTL8852CE 0xc852 /* 8852CE */
@ -9342,11 +9342,11 @@
#define PCI_PRODUCT_SYMBIOS_YELLOWFIN_2 0x0702 /* Yellowfin */
#define PCI_PRODUCT_SYMBIOS_61C102 0x0901 /* 61C102 */
#define PCI_PRODUCT_SYMBIOS_63C815 0x1000 /* 63C815 */
#define PCI_PRODUCT_SYMBIOS_1030R 0x1030 /* 53c1030R */
#define PCI_PRODUCT_SYMBIOS_MEGARAID_39XX 0x10e1 /* MegaRAID SAS39XX */
#define PCI_PRODUCT_SYMBIOS_MEGARAID_39XX_2 0x10e2 /* MegaRAID SAS39XX */
#define PCI_PRODUCT_SYMBIOS_MEGARAID_38XX 0x10e5 /* MegaRAID SAS38XX */
#define PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_2 0x10e6 /* MegaRAID SAS38XX */
#define PCI_PRODUCT_SYMBIOS_1030R 0x1030 /* 53c1030R */
#define PCI_PRODUCT_SYMBIOS_MEGARAID 0x1960 /* MegaRAID */
/* Packet Engines products */
@ -9963,16 +9963,16 @@
#define PCI_PRODUCT_VORTEX_GDT_ICP 0x0300 /* ICP */
#define PCI_PRODUCT_VORTEX_GDT_ICP2 0x0301 /* ICP */
/* Beijing WangXun Technology products */
#define PCI_PRODUCT_WANGXUN_WX1860A2 0x0101 /* WX1860A2 */
#define PCI_PRODUCT_WANGXUN_WX1860AL1 0x010b /* WX1860AL1 */
/* Nanjing QinHeng Electronics products */
#define PCI_PRODUCT_WCH_CH352 0x3253 /* CH352 */
#define PCI_PRODUCT_WCH2_CH351 0x2273 /* CH351 */
#define PCI_PRODUCT_WCH2_CH382_2 0x3250 /* CH382 */
#define PCI_PRODUCT_WCH2_CH382_1 0x3253 /* CH382 */
/* Beijing WangXun Technology products */
#define PCI_PRODUCT_WANGXUN_WX1860A2 0x0101 /* WX1860A2 */
#define PCI_PRODUCT_WANGXUN_WX1860AL1 0x010b /* WX1860AL1 */
/* Western Digital products */
#define PCI_PRODUCT_WD_WD33C193A 0x0193 /* WD33C193A */
#define PCI_PRODUCT_WD_WD33C196A 0x0196 /* WD33C196A */

View file

@ -2,7 +2,7 @@
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* generated from:
* OpenBSD: pcidevs,v 1.2074 2024/04/20 08:54:01 jsg Exp
* OpenBSD: pcidevs,v 1.2075 2024/05/21 07:03:55 jsg Exp
*/
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
@ -2911,14 +2911,14 @@ static const struct pci_known_product pci_known_products[] = {
PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111,
"AQC111",
},
{
PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112,
"AQC112",
},
{
PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC116C,
"AQC116C",
},
{
PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112,
"AQC112",
},
{
PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC115C,
"AQC115C",
@ -31767,14 +31767,6 @@ static const struct pci_known_product pci_known_products[] = {
PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8852AE_VT,
"8852AE-VT",
},
{
PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8852BE,
"8852BE",
},
{
PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8852BE_2,
"8852BE",
},
{
PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8723BE,
"8723BE",
@ -31783,6 +31775,14 @@ static const struct pci_known_product pci_known_products[] = {
PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8822BE,
"8822BE",
},
{
PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8852BE,
"8852BE",
},
{
PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8852BE_2,
"8852BE",
},
{
PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8821CE,
"8821CE",
@ -33619,6 +33619,10 @@ static const struct pci_known_product pci_known_products[] = {
PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_63C815,
"63C815",
},
{
PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_1030R,
"53c1030R",
},
{
PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_39XX,
"MegaRAID SAS39XX",
@ -33635,10 +33639,6 @@ static const struct pci_known_product pci_known_products[] = {
PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID_38XX_2,
"MegaRAID SAS38XX",
},
{
PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_1030R,
"53c1030R",
},
{
PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_MEGARAID,
"MegaRAID",
@ -35827,6 +35827,14 @@ static const struct pci_known_product pci_known_products[] = {
PCI_VENDOR_VORTEX, PCI_PRODUCT_VORTEX_GDT_ICP2,
"ICP",
},
{
PCI_VENDOR_WANGXUN, PCI_PRODUCT_WANGXUN_WX1860A2,
"WX1860A2",
},
{
PCI_VENDOR_WANGXUN, PCI_PRODUCT_WANGXUN_WX1860AL1,
"WX1860AL1",
},
{
PCI_VENDOR_WCH, PCI_PRODUCT_WCH_CH352,
"CH352",
@ -35843,14 +35851,6 @@ static const struct pci_known_product pci_known_products[] = {
PCI_VENDOR_WCH2, PCI_PRODUCT_WCH2_CH382_1,
"CH382",
},
{
PCI_VENDOR_WANGXUN, PCI_PRODUCT_WANGXUN_WX1860A2,
"WX1860A2",
},
{
PCI_VENDOR_WANGXUN, PCI_PRODUCT_WANGXUN_WX1860AL1,
"WX1860AL1",
},
{
PCI_VENDOR_WD, PCI_PRODUCT_WD_WD33C193A,
"WD33C193A",