sync with OpenBSD -current
This commit is contained in:
parent
d47112308d
commit
69f13bbae9
40 changed files with 422 additions and 153 deletions
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: ncr53c9x.c,v 1.80 2022/04/16 19:19:59 naddy Exp $ */
|
||||
/* $OpenBSD: ncr53c9x.c,v 1.81 2024/04/03 18:41:38 miod Exp $ */
|
||||
/* $NetBSD: ncr53c9x.c,v 1.56 2000/11/30 14:41:46 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -596,13 +596,13 @@ ncr53c9x_select(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
|
|||
* always possible that the interrupt may never happen.
|
||||
*/
|
||||
if ((ecb->xs->flags & SCSI_POLL) == 0) {
|
||||
int timeout = ecb->timeout;
|
||||
int timeout = ecb->xs->timeout;
|
||||
|
||||
if (timeout > 1000000)
|
||||
timeout = (timeout / 1000) * hz;
|
||||
else
|
||||
timeout = (timeout * hz) / 1000;
|
||||
timeout_add(&ecb->to, timeout);
|
||||
timeout_add(&ecb->xs->stimeout, timeout);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -741,7 +741,6 @@ ncr53c9x_get_ecb(void *null)
|
|||
if (ecb == NULL)
|
||||
return (NULL);
|
||||
|
||||
timeout_set(&ecb->to, ncr53c9x_timeout, ecb);
|
||||
ecb->flags |= ECB_ALLOC;
|
||||
|
||||
return (ecb);
|
||||
|
@ -842,7 +841,7 @@ ncr53c9x_scsi_cmd(struct scsi_xfer *xs)
|
|||
/* Initialize ecb */
|
||||
ecb = xs->io;
|
||||
ecb->xs = xs;
|
||||
ecb->timeout = xs->timeout;
|
||||
timeout_set(&xs->stimeout, ncr53c9x_timeout, ecb);
|
||||
|
||||
if (flags & SCSI_RESET) {
|
||||
ecb->flags |= ECB_RESET;
|
||||
|
@ -869,9 +868,9 @@ ncr53c9x_scsi_cmd(struct scsi_xfer *xs)
|
|||
return;
|
||||
|
||||
/* Not allowed to use interrupts, use polling instead */
|
||||
if (ncr53c9x_poll(sc, xs, ecb->timeout)) {
|
||||
if (ncr53c9x_poll(sc, xs, xs->timeout)) {
|
||||
ncr53c9x_timeout(ecb);
|
||||
if (ncr53c9x_poll(sc, xs, ecb->timeout))
|
||||
if (ncr53c9x_poll(sc, xs, xs->timeout))
|
||||
ncr53c9x_timeout(ecb);
|
||||
}
|
||||
}
|
||||
|
@ -1070,7 +1069,7 @@ ncr53c9x_sense(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
|
|||
ecb->daddr = (char *)&xs->sense;
|
||||
ecb->dleft = sizeof(struct scsi_sense_data);
|
||||
ecb->flags |= ECB_SENSE;
|
||||
ecb->timeout = NCR_SENSE_TIMEOUT;
|
||||
xs->timeout = NCR_SENSE_TIMEOUT;
|
||||
ti->senses++;
|
||||
li = TINFO_LUN(ti, lun);
|
||||
if (li->busy) li->busy = 0;
|
||||
|
@ -1101,7 +1100,7 @@ ncr53c9x_done(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
|
|||
|
||||
NCR_TRACE(("[ncr53c9x_done(error:%x)] ", xs->error));
|
||||
|
||||
timeout_del(&ecb->to);
|
||||
timeout_del(&ecb->xs->stimeout);
|
||||
|
||||
if (ecb->stat == SCSI_QUEUE_FULL) {
|
||||
/*
|
||||
|
@ -2175,7 +2174,7 @@ again:
|
|||
goto reset;
|
||||
}
|
||||
printf("sending REQUEST SENSE\n");
|
||||
timeout_del(&ecb->to);
|
||||
timeout_del(&ecb->xs->stimeout);
|
||||
ncr53c9x_sense(sc, ecb);
|
||||
goto out;
|
||||
}
|
||||
|
@ -2255,7 +2254,7 @@ printf("<<RESELECT CONT'd>>");
|
|||
*/
|
||||
if (sc->sc_state == NCR_SELECTING) {
|
||||
NCR_MISC(("backoff selector "));
|
||||
timeout_del(&ecb->to);
|
||||
timeout_del(&ecb->xs->stimeout);
|
||||
ncr53c9x_dequeue(sc, ecb);
|
||||
TAILQ_INSERT_HEAD(&sc->ready_list, ecb, chain);
|
||||
ecb->flags |= ECB_READY;
|
||||
|
@ -2693,11 +2692,11 @@ ncr53c9x_abort(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
|
|||
{
|
||||
|
||||
/* 2 secs for the abort */
|
||||
ecb->timeout = NCR_ABORT_TIMEOUT;
|
||||
ecb->xs->timeout = NCR_ABORT_TIMEOUT;
|
||||
ecb->flags |= ECB_ABORT;
|
||||
|
||||
if (ecb == sc->sc_nexus) {
|
||||
int timeout = ecb->timeout;
|
||||
int timeout = ecb->xs->timeout;
|
||||
|
||||
/*
|
||||
* If we're still selecting, the message will be scheduled
|
||||
|
@ -2713,7 +2712,7 @@ ncr53c9x_abort(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
|
|||
timeout = (timeout / 1000) * hz;
|
||||
else
|
||||
timeout = (timeout * hz) / 1000;
|
||||
timeout_add(&ecb->to, timeout);
|
||||
timeout_add(&ecb->xs->stimeout, timeout);
|
||||
} else {
|
||||
/*
|
||||
* Just leave the command where it is.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: ncr53c9xvar.h,v 1.24 2020/07/22 13:16:04 krw Exp $ */
|
||||
/* $OpenBSD: ncr53c9xvar.h,v 1.25 2024/04/03 18:41:38 miod Exp $ */
|
||||
/* $NetBSD: ncr53c9xvar.h,v 1.13 1998/05/26 23:17:34 thorpej Exp $ */
|
||||
|
||||
/*-
|
||||
|
@ -55,8 +55,6 @@
|
|||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/timeout.h>
|
||||
|
||||
/* Set this to 1 for normal debug, or 2 for per-target tracing. */
|
||||
#if !defined(SMALL_KERNEL)
|
||||
#define NCR53C9X_DEBUG 1
|
||||
|
@ -105,8 +103,6 @@ struct ncr53c9x_ecb {
|
|||
#define ECB_ABORT 0x40
|
||||
#define ECB_RESET 0x80
|
||||
#define ECB_TENTATIVE_DONE 0x100
|
||||
int timeout;
|
||||
struct timeout to;
|
||||
|
||||
struct {
|
||||
u_char msg[3]; /* Selection Id msg */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: ipmi.c,v 1.118 2022/04/08 13:13:14 mbuhl Exp $ */
|
||||
/* $OpenBSD: ipmi.c,v 1.119 2024/04/03 18:32:47 gkoehler Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2015 Masao Uebayashi
|
||||
|
@ -1596,7 +1596,8 @@ ipmi_attach_common(struct ipmi_softc *sc, struct ipmi_attach_args *ia)
|
|||
c->c_sc = sc;
|
||||
c->c_ccode = -1;
|
||||
|
||||
sc->sc_cmd_taskq = taskq_create("ipmicmd", 1, IPL_NONE, TASKQ_MPSAFE);
|
||||
sc->sc_cmd_taskq = taskq_create("ipmicmd", 1, IPL_MPFLOOR,
|
||||
TASKQ_MPSAFE);
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
|
|||
*/
|
||||
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (bo->kfd_bo)
|
||||
return mmu_interval_notifier_insert(&bo->notifier, current->mm,
|
||||
r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
|
||||
addr, amdgpu_bo_size(bo),
|
||||
&amdgpu_hmm_hsa_ops);
|
||||
return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
|
||||
amdgpu_bo_size(bo),
|
||||
&amdgpu_hmm_gfx_ops);
|
||||
else
|
||||
r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
|
||||
amdgpu_bo_size(bo),
|
||||
&amdgpu_hmm_gfx_ops);
|
||||
if (r)
|
||||
/*
|
||||
* Make sure amdgpu_hmm_unregister() doesn't call
|
||||
* mmu_interval_notifier_remove() when the notifier isn't properly
|
||||
* initialized.
|
||||
*/
|
||||
bo->notifier.mm = NULL;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -520,46 +520,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
|
|||
{
|
||||
struct amdgpu_ring *ring = file_inode(f)->i_private;
|
||||
volatile u32 *mqd;
|
||||
int r;
|
||||
u32 *kbuf;
|
||||
int r, i;
|
||||
uint32_t value, result;
|
||||
|
||||
if (*pos & 3 || size & 3)
|
||||
return -EINVAL;
|
||||
|
||||
result = 0;
|
||||
kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
r = amdgpu_bo_reserve(ring->mqd_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
goto err_free;
|
||||
|
||||
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto err_unreserve;
|
||||
|
||||
/*
|
||||
* Copy to local buffer to avoid put_user(), which might fault
|
||||
* and acquire mmap_sem, under reservation_ww_class_mutex.
|
||||
*/
|
||||
for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
|
||||
kbuf[i] = mqd[i];
|
||||
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
|
||||
result = 0;
|
||||
while (size) {
|
||||
if (*pos >= ring->mqd_size)
|
||||
goto done;
|
||||
break;
|
||||
|
||||
value = mqd[*pos/4];
|
||||
value = kbuf[*pos/4];
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r)
|
||||
goto done;
|
||||
goto err_free;
|
||||
buf += 4;
|
||||
result += 4;
|
||||
size -= 4;
|
||||
*pos += 4;
|
||||
}
|
||||
|
||||
done:
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
mqd = NULL;
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
kfree(kbuf);
|
||||
return result;
|
||||
|
||||
err_unreserve:
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
err_free:
|
||||
kfree(kbuf);
|
||||
return r;
|
||||
}
|
||||
|
||||
static const struct file_operations amdgpu_debugfs_mqd_fops = {
|
||||
|
|
|
@ -876,6 +876,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
|
|||
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
|
||||
gtt->ttm.dma_address, flags);
|
||||
}
|
||||
gtt->bound = true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1466,7 +1466,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
|
|||
|
||||
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
|
||||
{
|
||||
return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
|
||||
return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
|
||||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
|
||||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
|
||||
}
|
||||
|
|
|
@ -6125,9 +6125,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
|
||||
else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
|
||||
stream->signal == SIGNAL_TYPE_EDP) {
|
||||
|
||||
if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
|
||||
//
|
||||
// should decide stream support vsc sdp colorimetry capability
|
||||
// before building vsc info packet
|
||||
|
@ -6143,9 +6142,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
|
||||
tf = TRANSFER_FUNC_GAMMA_22;
|
||||
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
|
||||
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
|
||||
if (stream->link->psr_settings.psr_feature_enabled)
|
||||
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
}
|
||||
finish:
|
||||
dc_sink_release(sink);
|
||||
|
@ -10753,18 +10751,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
|||
if (!adev->dm.freesync_module)
|
||||
goto update;
|
||||
|
||||
if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
|
||||
|| sink->sink_signal == SIGNAL_TYPE_EDP) {
|
||||
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
|
||||
bool edid_check_required = false;
|
||||
|
||||
if (edid) {
|
||||
edid_check_required = is_dp_capable_without_timing_msa(
|
||||
adev->dm.dc,
|
||||
amdgpu_dm_connector);
|
||||
if (is_dp_capable_without_timing_msa(adev->dm.dc,
|
||||
amdgpu_dm_connector)) {
|
||||
if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
|
||||
freesync_capable = true;
|
||||
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
|
||||
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
|
||||
} else {
|
||||
edid_check_required = edid->version > 1 ||
|
||||
(edid->version == 1 &&
|
||||
edid->revision > 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (edid_check_required == true && (edid->version > 1 ||
|
||||
(edid->version == 1 && edid->revision > 1))) {
|
||||
if (edid_check_required) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
|
||||
timing = &edid->detailed_timings[i];
|
||||
|
@ -10784,14 +10788,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
|||
if (range->flags != 1)
|
||||
continue;
|
||||
|
||||
amdgpu_dm_connector->min_vfreq = range->min_vfreq;
|
||||
amdgpu_dm_connector->max_vfreq = range->max_vfreq;
|
||||
amdgpu_dm_connector->pixel_clock_mhz =
|
||||
range->pixel_clock_mhz * 10;
|
||||
|
||||
connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
|
||||
connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
|
||||
|
||||
if (edid->revision >= 4) {
|
||||
if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
|
||||
connector->display_info.monitor_range.min_vfreq += 255;
|
||||
if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
|
||||
connector->display_info.monitor_range.max_vfreq += 255;
|
||||
}
|
||||
|
||||
amdgpu_dm_connector->min_vfreq =
|
||||
connector->display_info.monitor_range.min_vfreq;
|
||||
amdgpu_dm_connector->max_vfreq =
|
||||
connector->display_info.monitor_range.max_vfreq;
|
||||
amdgpu_dm_connector->pixel_clock_mhz =
|
||||
range->pixel_clock_mhz * 10;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -619,10 +619,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
|
|||
if (pipe_ctx == NULL)
|
||||
return;
|
||||
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
|
||||
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
enable);
|
||||
|
||||
/* Wait for two frame to make sure AV mute is sent out */
|
||||
if (enable) {
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
|
||||
|
|
|
@ -142,6 +142,16 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
|
|||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
|
||||
OPTC_SEG0_SRC_SEL, 0xf,
|
||||
OPTC_SEG1_SRC_SEL, 0xf,
|
||||
OPTC_SEG2_SRC_SEL, 0xf,
|
||||
OPTC_SEG3_SRC_SEL, 0xf,
|
||||
OPTC_NUM_OF_INPUT_SEGMENT, 0);
|
||||
|
||||
REG_UPDATE(OPTC_MEMORY_CONFIG,
|
||||
OPTC_MEM_SEL, 0);
|
||||
|
||||
/* disable otg request until end of the first line
|
||||
* in the vertical blank region
|
||||
*/
|
||||
|
@ -174,6 +184,13 @@ static void optc32_disable_phantom_otg(struct timing_generator *optc)
|
|||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
|
||||
OPTC_SEG0_SRC_SEL, 0xf,
|
||||
OPTC_SEG1_SRC_SEL, 0xf,
|
||||
OPTC_SEG2_SRC_SEL, 0xf,
|
||||
OPTC_SEG3_SRC_SEL, 0xf,
|
||||
OPTC_NUM_OF_INPUT_SEGMENT, 0);
|
||||
|
||||
REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
|
|||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
if (!display)
|
||||
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
|
||||
|
||||
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
|
||||
|
||||
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
|
||||
|
|
|
@ -147,15 +147,12 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
|
|||
}
|
||||
|
||||
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
|
||||
if (stream->link->psr_settings.psr_feature_enabled) {
|
||||
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
|
||||
vsc_packet_revision = vsc_packet_rev4;
|
||||
else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
|
||||
vsc_packet_revision = vsc_packet_rev2;
|
||||
}
|
||||
|
||||
if (stream->link->replay_settings.config.replay_supported)
|
||||
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
|
||||
vsc_packet_revision = vsc_packet_rev4;
|
||||
else if (stream->link->replay_settings.config.replay_supported)
|
||||
vsc_packet_revision = vsc_packet_rev4;
|
||||
else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
|
||||
vsc_packet_revision = vsc_packet_rev2;
|
||||
|
||||
/* Update to revision 5 for extended colorimetry support */
|
||||
if (stream->use_vsc_sdp_for_colorimetry)
|
||||
|
|
|
@ -2397,6 +2397,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
|
|||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int err, ret;
|
||||
u32 pwm_mode;
|
||||
int value;
|
||||
|
||||
if (amdgpu_in_reset(adev))
|
||||
|
@ -2408,13 +2409,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (value == 0)
|
||||
pwm_mode = AMD_FAN_CTRL_NONE;
|
||||
else if (value == 1)
|
||||
pwm_mode = AMD_FAN_CTRL_MANUAL;
|
||||
else if (value == 2)
|
||||
pwm_mode = AMD_FAN_CTRL_AUTO;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_dpm_set_fan_control_mode(adev, value);
|
||||
ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
|
|
@ -27,8 +27,9 @@
|
|||
#include <linux/mutex.h>
|
||||
|
||||
#include <drm/drm_atomic_state_helper.h>
|
||||
#include <drm/drm_debugfs.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_debugfs.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_of.h>
|
||||
|
@ -1213,6 +1214,47 @@ int drm_bridge_get_modes(struct drm_bridge *bridge,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
|
||||
|
||||
/**
|
||||
* drm_bridge_edid_read - read the EDID data of the connected display
|
||||
* @bridge: bridge control structure
|
||||
* @connector: the connector to read EDID for
|
||||
*
|
||||
* If the bridge supports output EDID retrieval, as reported by the
|
||||
* DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
|
||||
* the EDID and return it. Otherwise return NULL.
|
||||
*
|
||||
* If &drm_bridge_funcs.edid_read is not set, fall back to using
|
||||
* drm_bridge_get_edid() and wrapping it in struct drm_edid.
|
||||
*
|
||||
* RETURNS:
|
||||
* The retrieved EDID on success, or NULL otherwise.
|
||||
*/
|
||||
const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
|
||||
return NULL;
|
||||
|
||||
/* Transitional: Fall back to ->get_edid. */
|
||||
if (!bridge->funcs->edid_read) {
|
||||
const struct drm_edid *drm_edid;
|
||||
struct edid *edid;
|
||||
|
||||
edid = drm_bridge_get_edid(bridge, connector);
|
||||
if (!edid)
|
||||
return NULL;
|
||||
|
||||
drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH);
|
||||
|
||||
kfree(edid);
|
||||
|
||||
return drm_edid;
|
||||
}
|
||||
|
||||
return bridge->funcs->edid_read(bridge, connector);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
|
||||
|
||||
/**
|
||||
* drm_bridge_get_edid - get the EDID data of the connected display
|
||||
* @bridge: bridge control structure
|
||||
|
@ -1222,6 +1264,8 @@ EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
|
|||
* DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
|
||||
* get the EDID and return it. Otherwise return NULL.
|
||||
*
|
||||
* Deprecated. Prefer using drm_bridge_edid_read().
|
||||
*
|
||||
* RETURNS:
|
||||
* The retrieved EDID on success, or NULL otherwise.
|
||||
*/
|
||||
|
|
|
@ -274,19 +274,24 @@ EXPORT_SYMBOL(drm_panel_disable);
|
|||
* The modes probed from the panel are automatically added to the connector
|
||||
* that the panel is attached to.
|
||||
*
|
||||
* Return: The number of modes available from the panel on success or a
|
||||
* negative error code on failure.
|
||||
* Return: The number of modes available from the panel on success, or 0 on
|
||||
* failure (no modes).
|
||||
*/
|
||||
int drm_panel_get_modes(struct drm_panel *panel,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
if (!panel)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
if (panel->funcs && panel->funcs->get_modes)
|
||||
return panel->funcs->get_modes(panel, connector);
|
||||
if (panel->funcs && panel->funcs->get_modes) {
|
||||
int num;
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
num = panel->funcs->get_modes(panel, connector);
|
||||
if (num > 0)
|
||||
return num;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_panel_get_modes);
|
||||
|
||||
|
|
|
@ -419,6 +419,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
|
|||
|
||||
count = connector_funcs->get_modes(connector);
|
||||
|
||||
/* The .get_modes() callback should not return negative values. */
|
||||
if (count < 0) {
|
||||
drm_err(connector->dev, ".get_modes() returned %pe\n",
|
||||
ERR_PTR(count));
|
||||
count = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
|
||||
* override/firmware EDID.
|
||||
|
|
|
@ -1155,7 +1155,6 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
|
|||
}
|
||||
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
|
||||
|
||||
/* ensure all panel commands dispatched before enabling transcoder */
|
||||
wait_for_cmds_dispatched_to_panel(encoder);
|
||||
|
@ -1256,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
|
|||
/* step6d: enable dsi transcoder */
|
||||
gen11_dsi_enable_transcoder(encoder);
|
||||
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
|
||||
|
||||
/* step7: enable backlight */
|
||||
intel_backlight_enable(crtc_state, conn_state);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
|
||||
|
|
|
@ -1945,16 +1945,12 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
|
|||
* these devices we split the init OTP sequence into a deassert sequence and
|
||||
* the actual init OTP part.
|
||||
*/
|
||||
static void fixup_mipi_sequences(struct drm_i915_private *i915,
|
||||
struct intel_panel *panel)
|
||||
static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
|
||||
struct intel_panel *panel)
|
||||
{
|
||||
u8 *init_otp;
|
||||
int len;
|
||||
|
||||
/* Limit this to VLV for now. */
|
||||
if (!IS_VALLEYVIEW(i915))
|
||||
return;
|
||||
|
||||
/* Limit this to v1 vid-mode sequences */
|
||||
if (panel->vbt.dsi.config->is_cmd_mode ||
|
||||
panel->vbt.dsi.seq_version != 1)
|
||||
|
@ -1990,6 +1986,41 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915,
|
|||
panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some machines (eg. Lenovo 82TQ) appear to have broken
|
||||
* VBT sequences:
|
||||
* - INIT_OTP is not present at all
|
||||
* - what should be in INIT_OTP is in DISPLAY_ON
|
||||
* - what should be in DISPLAY_ON is in BACKLIGHT_ON
|
||||
* (along with the actual backlight stuff)
|
||||
*
|
||||
* To make those work we simply swap DISPLAY_ON and INIT_OTP.
|
||||
*
|
||||
* TODO: Do we need to limit this to specific machines,
|
||||
* or examine the contents of the sequences to
|
||||
* avoid false positives?
|
||||
*/
|
||||
static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
|
||||
struct intel_panel *panel)
|
||||
{
|
||||
if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
|
||||
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
|
||||
drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
|
||||
|
||||
swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
|
||||
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
|
||||
}
|
||||
}
|
||||
|
||||
static void fixup_mipi_sequences(struct drm_i915_private *i915,
|
||||
struct intel_panel *panel)
|
||||
{
|
||||
if (DISPLAY_VER(i915) >= 11)
|
||||
icl_fixup_mipi_sequences(i915, panel);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
vlv_fixup_mipi_sequences(i915, panel);
|
||||
}
|
||||
|
||||
static void
|
||||
parse_mipi_sequence(struct drm_i915_private *i915,
|
||||
struct intel_panel *panel)
|
||||
|
@ -3330,6 +3361,9 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
|
|||
{
|
||||
const struct child_device_config *child = &devdata->child;
|
||||
|
||||
if (!devdata)
|
||||
return false;
|
||||
|
||||
if (!intel_bios_encoder_supports_dp(devdata) ||
|
||||
!intel_bios_encoder_supports_hdmi(devdata))
|
||||
return false;
|
||||
|
|
|
@ -246,7 +246,14 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
|
|||
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
|
||||
struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
|
||||
|
||||
return intel_port_to_phy(i915, dig_port->base.port);
|
||||
/*
|
||||
* FIXME should we care about the (VBT defined) dig_port->aux_ch
|
||||
* relationship or should this be purely defined by the hardware layout?
|
||||
* Currently if the port doesn't appear in the VBT, or if it's declared
|
||||
* as HDMI-only and routed to a combo PHY, the encoder either won't be
|
||||
* present at all or it will not have an aux_ch assigned.
|
||||
*/
|
||||
return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
|
||||
}
|
||||
|
||||
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
|
@ -414,7 +421,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
|
||||
intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
|
||||
|
||||
if (DISPLAY_VER(dev_priv) < 12)
|
||||
/* FIXME this is a mess */
|
||||
if (phy != PHY_NONE)
|
||||
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
|
||||
0, ICL_LANE_ENABLE_AUX);
|
||||
|
||||
|
@ -437,7 +445,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
|
|||
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
|
||||
|
||||
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0);
|
||||
/* FIXME this is a mess */
|
||||
if (phy != PHY_NONE)
|
||||
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
|
||||
ICL_LANE_ENABLE_AUX, 0);
|
||||
|
||||
intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
|
||||
|
||||
|
|
|
@ -2462,7 +2462,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
|
|||
static bool
|
||||
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
|
||||
{
|
||||
return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
|
||||
return ((IS_ELKHARTLAKE(i915) &&
|
||||
IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
|
||||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
|
||||
i915->display.dpll.ref_clks.nssc == 38400;
|
||||
|
|
|
@ -379,6 +379,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
GEM_WARN_ON(obj->userptr.page_ref);
|
||||
|
||||
if (!obj->userptr.notifier.mm)
|
||||
return;
|
||||
|
||||
mmu_interval_notifier_remove(&obj->userptr.notifier);
|
||||
obj->userptr.notifier.mm = NULL;
|
||||
}
|
||||
|
|
|
@ -278,9 +278,6 @@ static int __engine_park(struct intel_wakeref *wf)
|
|||
intel_engine_park_heartbeat(engine);
|
||||
intel_breadcrumbs_park(engine->breadcrumbs);
|
||||
|
||||
/* Must be reset upon idling, or we may miss the busy wakeup. */
|
||||
GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
|
||||
|
||||
if (engine->park)
|
||||
engine->park(engine);
|
||||
|
||||
|
|
|
@ -3279,6 +3279,9 @@ static void execlists_park(struct intel_engine_cs *engine)
|
|||
{
|
||||
cancel_timer(&engine->execlists.timer);
|
||||
cancel_timer(&engine->execlists.preempt);
|
||||
|
||||
/* Reset upon idling, or we may delay the busy wakeup. */
|
||||
WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
|
||||
}
|
||||
|
||||
static void add_to_engine(struct i915_request *rq)
|
||||
|
|
|
@ -72,12 +72,13 @@ hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
|
|||
struct intel_uncore *uncore = ddat->uncore;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
with_intel_runtime_pm(uncore->rpm, wakeref) {
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
|
||||
with_intel_runtime_pm(uncore->rpm, wakeref)
|
||||
intel_uncore_rmw(uncore, reg, clear, set);
|
||||
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -136,20 +137,21 @@ hwm_energy(struct hwm_drvdata *ddat, long *energy)
|
|||
else
|
||||
rgaddr = hwmon->rg.energy_status_all;
|
||||
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
with_intel_runtime_pm(uncore->rpm, wakeref) {
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
|
||||
with_intel_runtime_pm(uncore->rpm, wakeref)
|
||||
reg_val = intel_uncore_read(uncore, rgaddr);
|
||||
|
||||
if (reg_val >= ei->reg_val_prev)
|
||||
ei->accum_energy += reg_val - ei->reg_val_prev;
|
||||
else
|
||||
ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
|
||||
ei->reg_val_prev = reg_val;
|
||||
if (reg_val >= ei->reg_val_prev)
|
||||
ei->accum_energy += reg_val - ei->reg_val_prev;
|
||||
else
|
||||
ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
|
||||
ei->reg_val_prev = reg_val;
|
||||
|
||||
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
|
||||
hwmon->scl_shift_energy);
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
|
||||
hwmon->scl_shift_energy);
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -404,6 +406,7 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
|
|||
|
||||
/* Block waiting for GuC reset to complete when needed */
|
||||
for (;;) {
|
||||
wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
|
||||
prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
|
||||
|
@ -417,14 +420,13 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
|
|||
}
|
||||
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
|
||||
|
||||
schedule();
|
||||
}
|
||||
finish_wait(&ddat->waitq, &wait);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
|
||||
goto exit;
|
||||
|
||||
/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
|
||||
if (val == PL1_DISABLE) {
|
||||
|
@ -444,9 +446,8 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
|
|||
intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
|
||||
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
|
||||
exit:
|
||||
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
|
||||
unlock:
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,8 @@
|
|||
#include <machine/pte.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define pgprot_val(v) (v)
|
||||
#define pgprot_val(p) (p)
|
||||
#define pgprot_decrypted(p) (p)
|
||||
#define PAGE_KERNEL 0
|
||||
#define PAGE_KERNEL_IO 0
|
||||
|
||||
|
|
|
@ -555,6 +555,37 @@ struct drm_bridge_funcs {
|
|||
int (*get_modes)(struct drm_bridge *bridge,
|
||||
struct drm_connector *connector);
|
||||
|
||||
/**
|
||||
* @edid_read:
|
||||
*
|
||||
* Read the EDID data of the connected display.
|
||||
*
|
||||
* The @edid_read callback is the preferred way of reporting mode
|
||||
* information for a display connected to the bridge output. Bridges
|
||||
* that support reading EDID shall implement this callback and leave
|
||||
* the @get_modes callback unimplemented.
|
||||
*
|
||||
* The caller of this operation shall first verify the output
|
||||
* connection status and refrain from reading EDID from a disconnected
|
||||
* output.
|
||||
*
|
||||
* This callback is optional. Bridges that implement it shall set the
|
||||
* DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops.
|
||||
*
|
||||
* The connector parameter shall be used for the sole purpose of EDID
|
||||
* retrieval, and shall not be stored internally by bridge drivers for
|
||||
* future usage.
|
||||
*
|
||||
* RETURNS:
|
||||
*
|
||||
* An edid structure newly allocated with drm_edid_alloc() or returned
|
||||
* from drm_edid_read() family of functions on success, or NULL
|
||||
* otherwise. The caller is responsible for freeing the returned edid
|
||||
* structure with drm_edid_free().
|
||||
*/
|
||||
const struct drm_edid *(*edid_read)(struct drm_bridge *bridge,
|
||||
struct drm_connector *connector);
|
||||
|
||||
/**
|
||||
* @get_edid:
|
||||
*
|
||||
|
@ -888,6 +919,8 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
|
|||
enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
|
||||
int drm_bridge_get_modes(struct drm_bridge *bridge,
|
||||
struct drm_connector *connector);
|
||||
const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
|
||||
struct drm_connector *connector);
|
||||
struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
|
||||
struct drm_connector *connector);
|
||||
void drm_bridge_hpd_enable(struct drm_bridge *bridge,
|
||||
|
|
|
@ -898,7 +898,8 @@ struct drm_connector_helper_funcs {
|
|||
*
|
||||
* RETURNS:
|
||||
*
|
||||
* The number of modes added by calling drm_mode_probed_add().
|
||||
* The number of modes added by calling drm_mode_probed_add(). Return 0
|
||||
* on failures (no modes) instead of negative error codes.
|
||||
*/
|
||||
int (*get_modes)(struct drm_connector *connector);
|
||||
|
||||
|
|
|
@ -81,6 +81,12 @@ struct ttm_tt {
|
|||
* page_flags = TTM_TT_FLAG_EXTERNAL |
|
||||
* TTM_TT_FLAG_EXTERNAL_MAPPABLE;
|
||||
*
|
||||
* TTM_TT_FLAG_DECRYPTED: The mapped ttm pages should be marked as
|
||||
* not encrypted. The framework will try to match what the dma layer
|
||||
* is doing, but note that it is a little fragile because ttm page
|
||||
* fault handling abuses the DMA api a bit and dma_map_attrs can't be
|
||||
* used to assure pgprot always matches.
|
||||
*
|
||||
* TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
|
||||
* set by TTM after ttm_tt_populate() has successfully returned, and is
|
||||
* then unset when TTM calls ttm_tt_unpopulate().
|
||||
|
@ -89,8 +95,9 @@ struct ttm_tt {
|
|||
#define TTM_TT_FLAG_ZERO_ALLOC BIT(1)
|
||||
#define TTM_TT_FLAG_EXTERNAL BIT(2)
|
||||
#define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3)
|
||||
#define TTM_TT_FLAG_DECRYPTED BIT(4)
|
||||
|
||||
#define TTM_TT_FLAG_PRIV_POPULATED BIT(4)
|
||||
#define TTM_TT_FLAG_PRIV_POPULATED BIT(5)
|
||||
uint32_t page_flags;
|
||||
/** @num_pages: Number of pages in the page array. */
|
||||
uint32_t num_pages;
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
/* Public domain. */
|
||||
|
||||
#ifndef _LINUX_CC_PLATFORM_H
|
||||
#define _LINUX_CC_PLATFORM_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define CC_ATTR_GUEST_MEM_ENCRYPT 0
|
||||
|
||||
static inline bool
|
||||
cc_platform_has(int x)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -296,7 +296,13 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
|
|||
enum ttm_caching caching;
|
||||
|
||||
man = ttm_manager_type(bo->bdev, res->mem_type);
|
||||
caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
|
||||
if (man->use_tt) {
|
||||
caching = bo->ttm->caching;
|
||||
if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
|
||||
tmp = pgprot_decrypted(tmp);
|
||||
} else {
|
||||
caching = res->bus.caching;
|
||||
}
|
||||
|
||||
return ttm_prot_from_caching(caching, tmp);
|
||||
}
|
||||
|
@ -348,6 +354,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
|||
.no_wait_gpu = false
|
||||
};
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_resource_manager *man =
|
||||
ttm_manager_type(bo->bdev, bo->resource->mem_type);
|
||||
pgprot_t prot;
|
||||
int ret;
|
||||
|
||||
|
@ -357,7 +365,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (num_pages == 1 && ttm->caching == ttm_cached) {
|
||||
if (num_pages == 1 && ttm->caching == ttm_cached &&
|
||||
!(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
|
||||
/*
|
||||
* We're mapping a single page, and the desired
|
||||
* page protection is consistent with the bo.
|
||||
|
|
|
@ -31,11 +31,14 @@
|
|||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/module.h>
|
||||
#include <drm/drm_cache.h>
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_util.h>
|
||||
#include <drm/ttm/ttm_bo.h>
|
||||
#include <drm/ttm/ttm_tt.h>
|
||||
|
||||
|
@ -60,6 +63,7 @@ static atomic_long_t ttm_dma32_pages_allocated;
|
|||
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
|
||||
{
|
||||
struct ttm_device *bdev = bo->bdev;
|
||||
struct drm_device *ddev = bo->base.dev;
|
||||
uint32_t page_flags = 0;
|
||||
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
|
@ -81,6 +85,15 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
|
|||
pr_err("Illegal buffer object type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* When using dma_alloc_coherent with memory encryption the
|
||||
* mapped TT pages need to be decrypted or otherwise the drivers
|
||||
* will end up sending encrypted mem to the gpu.
|
||||
*/
|
||||
if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
|
||||
page_flags |= TTM_TT_FLAG_DECRYPTED;
|
||||
drm_info(ddev, "TT memory decryption enabled.");
|
||||
}
|
||||
|
||||
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
|
||||
if (unlikely(bo->ttm == NULL))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue