sync with OpenBSD -current

This commit is contained in:
purplerain 2024-04-04 10:05:09 +00:00
parent d47112308d
commit 69f13bbae9
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
40 changed files with 422 additions and 153 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: localtime.c,v 1.65 2022/10/03 15:34:39 millert Exp $ */ /* $OpenBSD: localtime.c,v 1.66 2024/04/04 02:20:01 millert Exp $ */
/* /*
** This file is in the public domain, so clarified as of ** This file is in the public domain, so clarified as of
** 1996-06-05 by Arthur David Olson. ** 1996-06-05 by Arthur David Olson.
@ -189,7 +189,6 @@ static struct state * gmtptr;
#define TZ_STRLEN_MAX 255 #define TZ_STRLEN_MAX 255
#endif /* !defined TZ_STRLEN_MAX */ #endif /* !defined TZ_STRLEN_MAX */
static char lcl_TZname[TZ_STRLEN_MAX + 1];
static int lcl_is_set; static int lcl_is_set;
static int gmt_is_set; static int gmt_is_set;
_THREAD_PRIVATE_MUTEX(lcl); _THREAD_PRIVATE_MUTEX(lcl);
@ -1147,9 +1146,11 @@ tzsetwall(void)
static void static void
tzset_basic(void) tzset_basic(void)
{ {
static char lcl_TZname[TZ_STRLEN_MAX + 1];
const char * name; const char * name;
if (issetugid() || (name = getenv("TZ")) == NULL) { name = getenv("TZ");
if (name == NULL) {
tzsetwall_basic(); tzsetwall_basic();
return; return;
} }
@ -1160,6 +1161,10 @@ tzset_basic(void)
if (lcl_is_set) if (lcl_is_set)
strlcpy(lcl_TZname, name, sizeof lcl_TZname); strlcpy(lcl_TZname, name, sizeof lcl_TZname);
/* Ignore TZ for setuid/setgid processes. */
if (issetugid())
name = TZDEFAULT;
if (lclptr == NULL) { if (lclptr == NULL) {
lclptr = calloc(1, sizeof *lclptr); lclptr = calloc(1, sizeof *lclptr);
if (lclptr == NULL) { if (lclptr == NULL) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ssl_tlsext.c,v 1.147 2024/04/02 22:50:54 sthen Exp $ */ /* $OpenBSD: ssl_tlsext.c,v 1.148 2024/04/04 08:02:21 tb Exp $ */
/* /*
* Copyright (c) 2016, 2017, 2019 Joel Sing <jsing@openbsd.org> * Copyright (c) 2016, 2017, 2019 Joel Sing <jsing@openbsd.org>
* Copyright (c) 2017 Doug Hogan <doug@openbsd.org> * Copyright (c) 2017 Doug Hogan <doug@openbsd.org>
@ -325,14 +325,17 @@ tlsext_supportedgroups_client_process(SSL *s, uint16_t msg_type, CBS *cbs,
int *alert) int *alert)
{ {
/* /*
* Servers should not send this extension per the RFC. * This extension is only allowed in TLSv1.3 encrypted extensions.
* * It is not permitted in a ServerHello in any version of TLS.
* However, certain F5 BIG-IP systems incorrectly send it. This bug is */
* from at least 2014 but as of 2017, there are still large sites with if (msg_type != SSL_TLSEXT_MSG_EE)
* this unpatched in production. As a result, we need to currently skip return 0;
* over the extension and ignore its content:
* /*
* https://support.f5.com/csp/article/K37345003 * RFC 8446, section 4.2.7: TLSv1.3 servers can send this extension but
* clients must not act on it during the handshake. This allows servers
* to advertise their preferences for subsequent handshakes. We ignore
* this complication.
*/ */
if (!CBS_skip(cbs, CBS_len(cbs))) { if (!CBS_skip(cbs, CBS_len(cbs))) {
*alert = SSL_AD_INTERNAL_ERROR; *alert = SSL_AD_INTERNAL_ERROR;

View file

@ -5,7 +5,7 @@ fib-update no
neighbor 10.12.57.1 { neighbor 10.12.57.1 {
descr "RDOMAIN1" descr "RDOMAIN1"
remote-as 4200000001 remote-as 4200000001
tcp md5sig password password1 tcp md5sig key 70617373776f726431
} }
deny from any deny from any

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pmap.c,v 1.165 2023/12/29 13:23:27 jca Exp $ */ /* $OpenBSD: pmap.c,v 1.166 2024/04/03 18:43:32 miod Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */ /* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/* /*
@ -3105,13 +3105,6 @@ pmap_steal_memory(vsize_t size, vaddr_t *start, vaddr_t *end)
return (va); return (va);
} }
void
pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
{
*vstartp = virtual_avail;
*vendp = VM_MAX_KERNEL_ADDRESS;
}
/* /*
* pmap_convert * pmap_convert
* *

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pmap.c,v 1.222 2023/04/13 15:23:22 miod Exp $ */ /* $OpenBSD: pmap.c,v 1.223 2024/04/03 18:43:32 miod Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */ /* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/* /*
@ -1555,7 +1555,7 @@ pmap_extract_86(struct pmap *pmap, vaddr_t va, paddr_t *pap)
} }
/* /*
* pmap_virtual_space: used during bootup [pmap_steal_memory] to * pmap_virtual_space: used during bootup [uvm_pageboot_alloc] to
* determine the bounds of the kernel virtual address space. * determine the bounds of the kernel virtual address space.
*/ */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ncr53c9x.c,v 1.80 2022/04/16 19:19:59 naddy Exp $ */ /* $OpenBSD: ncr53c9x.c,v 1.81 2024/04/03 18:41:38 miod Exp $ */
/* $NetBSD: ncr53c9x.c,v 1.56 2000/11/30 14:41:46 thorpej Exp $ */ /* $NetBSD: ncr53c9x.c,v 1.56 2000/11/30 14:41:46 thorpej Exp $ */
/* /*
@ -596,13 +596,13 @@ ncr53c9x_select(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
* always possible that the interrupt may never happen. * always possible that the interrupt may never happen.
*/ */
if ((ecb->xs->flags & SCSI_POLL) == 0) { if ((ecb->xs->flags & SCSI_POLL) == 0) {
int timeout = ecb->timeout; int timeout = ecb->xs->timeout;
if (timeout > 1000000) if (timeout > 1000000)
timeout = (timeout / 1000) * hz; timeout = (timeout / 1000) * hz;
else else
timeout = (timeout * hz) / 1000; timeout = (timeout * hz) / 1000;
timeout_add(&ecb->to, timeout); timeout_add(&ecb->xs->stimeout, timeout);
} }
/* /*
@ -741,7 +741,6 @@ ncr53c9x_get_ecb(void *null)
if (ecb == NULL) if (ecb == NULL)
return (NULL); return (NULL);
timeout_set(&ecb->to, ncr53c9x_timeout, ecb);
ecb->flags |= ECB_ALLOC; ecb->flags |= ECB_ALLOC;
return (ecb); return (ecb);
@ -842,7 +841,7 @@ ncr53c9x_scsi_cmd(struct scsi_xfer *xs)
/* Initialize ecb */ /* Initialize ecb */
ecb = xs->io; ecb = xs->io;
ecb->xs = xs; ecb->xs = xs;
ecb->timeout = xs->timeout; timeout_set(&xs->stimeout, ncr53c9x_timeout, ecb);
if (flags & SCSI_RESET) { if (flags & SCSI_RESET) {
ecb->flags |= ECB_RESET; ecb->flags |= ECB_RESET;
@ -869,9 +868,9 @@ ncr53c9x_scsi_cmd(struct scsi_xfer *xs)
return; return;
/* Not allowed to use interrupts, use polling instead */ /* Not allowed to use interrupts, use polling instead */
if (ncr53c9x_poll(sc, xs, ecb->timeout)) { if (ncr53c9x_poll(sc, xs, xs->timeout)) {
ncr53c9x_timeout(ecb); ncr53c9x_timeout(ecb);
if (ncr53c9x_poll(sc, xs, ecb->timeout)) if (ncr53c9x_poll(sc, xs, xs->timeout))
ncr53c9x_timeout(ecb); ncr53c9x_timeout(ecb);
} }
} }
@ -1070,7 +1069,7 @@ ncr53c9x_sense(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
ecb->daddr = (char *)&xs->sense; ecb->daddr = (char *)&xs->sense;
ecb->dleft = sizeof(struct scsi_sense_data); ecb->dleft = sizeof(struct scsi_sense_data);
ecb->flags |= ECB_SENSE; ecb->flags |= ECB_SENSE;
ecb->timeout = NCR_SENSE_TIMEOUT; xs->timeout = NCR_SENSE_TIMEOUT;
ti->senses++; ti->senses++;
li = TINFO_LUN(ti, lun); li = TINFO_LUN(ti, lun);
if (li->busy) li->busy = 0; if (li->busy) li->busy = 0;
@ -1101,7 +1100,7 @@ ncr53c9x_done(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
NCR_TRACE(("[ncr53c9x_done(error:%x)] ", xs->error)); NCR_TRACE(("[ncr53c9x_done(error:%x)] ", xs->error));
timeout_del(&ecb->to); timeout_del(&ecb->xs->stimeout);
if (ecb->stat == SCSI_QUEUE_FULL) { if (ecb->stat == SCSI_QUEUE_FULL) {
/* /*
@ -2175,7 +2174,7 @@ again:
goto reset; goto reset;
} }
printf("sending REQUEST SENSE\n"); printf("sending REQUEST SENSE\n");
timeout_del(&ecb->to); timeout_del(&ecb->xs->stimeout);
ncr53c9x_sense(sc, ecb); ncr53c9x_sense(sc, ecb);
goto out; goto out;
} }
@ -2255,7 +2254,7 @@ printf("<<RESELECT CONT'd>>");
*/ */
if (sc->sc_state == NCR_SELECTING) { if (sc->sc_state == NCR_SELECTING) {
NCR_MISC(("backoff selector ")); NCR_MISC(("backoff selector "));
timeout_del(&ecb->to); timeout_del(&ecb->xs->stimeout);
ncr53c9x_dequeue(sc, ecb); ncr53c9x_dequeue(sc, ecb);
TAILQ_INSERT_HEAD(&sc->ready_list, ecb, chain); TAILQ_INSERT_HEAD(&sc->ready_list, ecb, chain);
ecb->flags |= ECB_READY; ecb->flags |= ECB_READY;
@ -2693,11 +2692,11 @@ ncr53c9x_abort(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
{ {
/* 2 secs for the abort */ /* 2 secs for the abort */
ecb->timeout = NCR_ABORT_TIMEOUT; ecb->xs->timeout = NCR_ABORT_TIMEOUT;
ecb->flags |= ECB_ABORT; ecb->flags |= ECB_ABORT;
if (ecb == sc->sc_nexus) { if (ecb == sc->sc_nexus) {
int timeout = ecb->timeout; int timeout = ecb->xs->timeout;
/* /*
* If we're still selecting, the message will be scheduled * If we're still selecting, the message will be scheduled
@ -2713,7 +2712,7 @@ ncr53c9x_abort(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
timeout = (timeout / 1000) * hz; timeout = (timeout / 1000) * hz;
else else
timeout = (timeout * hz) / 1000; timeout = (timeout * hz) / 1000;
timeout_add(&ecb->to, timeout); timeout_add(&ecb->xs->stimeout, timeout);
} else { } else {
/* /*
* Just leave the command where it is. * Just leave the command where it is.

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ncr53c9xvar.h,v 1.24 2020/07/22 13:16:04 krw Exp $ */ /* $OpenBSD: ncr53c9xvar.h,v 1.25 2024/04/03 18:41:38 miod Exp $ */
/* $NetBSD: ncr53c9xvar.h,v 1.13 1998/05/26 23:17:34 thorpej Exp $ */ /* $NetBSD: ncr53c9xvar.h,v 1.13 1998/05/26 23:17:34 thorpej Exp $ */
/*- /*-
@ -55,8 +55,6 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <sys/timeout.h>
/* Set this to 1 for normal debug, or 2 for per-target tracing. */ /* Set this to 1 for normal debug, or 2 for per-target tracing. */
#if !defined(SMALL_KERNEL) #if !defined(SMALL_KERNEL)
#define NCR53C9X_DEBUG 1 #define NCR53C9X_DEBUG 1
@ -105,8 +103,6 @@ struct ncr53c9x_ecb {
#define ECB_ABORT 0x40 #define ECB_ABORT 0x40
#define ECB_RESET 0x80 #define ECB_RESET 0x80
#define ECB_TENTATIVE_DONE 0x100 #define ECB_TENTATIVE_DONE 0x100
int timeout;
struct timeout to;
struct { struct {
u_char msg[3]; /* Selection Id msg */ u_char msg[3]; /* Selection Id msg */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ipmi.c,v 1.118 2022/04/08 13:13:14 mbuhl Exp $ */ /* $OpenBSD: ipmi.c,v 1.119 2024/04/03 18:32:47 gkoehler Exp $ */
/* /*
* Copyright (c) 2015 Masao Uebayashi * Copyright (c) 2015 Masao Uebayashi
@ -1596,7 +1596,8 @@ ipmi_attach_common(struct ipmi_softc *sc, struct ipmi_attach_args *ia)
c->c_sc = sc; c->c_sc = sc;
c->c_ccode = -1; c->c_ccode = -1;
sc->sc_cmd_taskq = taskq_create("ipmicmd", 1, IPL_NONE, TASKQ_MPSAFE); sc->sc_cmd_taskq = taskq_create("ipmicmd", 1, IPL_MPFLOOR,
TASKQ_MPSAFE);
} }
int int

View file

@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
*/ */
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr) int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{ {
int r;
if (bo->kfd_bo) if (bo->kfd_bo)
return mmu_interval_notifier_insert(&bo->notifier, current->mm, r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
addr, amdgpu_bo_size(bo), addr, amdgpu_bo_size(bo),
&amdgpu_hmm_hsa_ops); &amdgpu_hmm_hsa_ops);
return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, else
amdgpu_bo_size(bo), r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
&amdgpu_hmm_gfx_ops); amdgpu_bo_size(bo),
&amdgpu_hmm_gfx_ops);
if (r)
/*
* Make sure amdgpu_hmm_unregister() doesn't call
* mmu_interval_notifier_remove() when the notifier isn't properly
* initialized.
*/
bo->notifier.mm = NULL;
return r;
} }
/** /**

View file

@ -520,46 +520,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
{ {
struct amdgpu_ring *ring = file_inode(f)->i_private; struct amdgpu_ring *ring = file_inode(f)->i_private;
volatile u32 *mqd; volatile u32 *mqd;
int r; u32 *kbuf;
int r, i;
uint32_t value, result; uint32_t value, result;
if (*pos & 3 || size & 3) if (*pos & 3 || size & 3)
return -EINVAL; return -EINVAL;
result = 0; kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
r = amdgpu_bo_reserve(ring->mqd_obj, false); r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; goto err_free;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
if (r) { if (r)
amdgpu_bo_unreserve(ring->mqd_obj); goto err_unreserve;
return r;
}
/*
* Copy to local buffer to avoid put_user(), which might fault
* and acquire mmap_sem, under reservation_ww_class_mutex.
*/
for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
kbuf[i] = mqd[i];
amdgpu_bo_kunmap(ring->mqd_obj);
amdgpu_bo_unreserve(ring->mqd_obj);
result = 0;
while (size) { while (size) {
if (*pos >= ring->mqd_size) if (*pos >= ring->mqd_size)
goto done; break;
value = mqd[*pos/4]; value = kbuf[*pos/4];
r = put_user(value, (uint32_t *)buf); r = put_user(value, (uint32_t *)buf);
if (r) if (r)
goto done; goto err_free;
buf += 4; buf += 4;
result += 4; result += 4;
size -= 4; size -= 4;
*pos += 4; *pos += 4;
} }
done: kfree(kbuf);
amdgpu_bo_kunmap(ring->mqd_obj);
mqd = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
if (r)
return r;
return result; return result;
err_unreserve:
amdgpu_bo_unreserve(ring->mqd_obj);
err_free:
kfree(kbuf);
return r;
} }
static const struct file_operations amdgpu_debugfs_mqd_fops = { static const struct file_operations amdgpu_debugfs_mqd_fops = {

View file

@ -876,6 +876,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
gtt->ttm.dma_address, flags); gtt->ttm.dma_address, flags);
} }
gtt->bound = true;
} }
/* /*

View file

@ -1466,7 +1466,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{ {
return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) || return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
} }

View file

@ -6125,9 +6125,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
stream->signal == SIGNAL_TYPE_EDP) {
// //
// should decide stream support vsc sdp colorimetry capability // should decide stream support vsc sdp colorimetry capability
// before building vsc info packet // before building vsc info packet
@ -6143,9 +6142,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22; tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
if (stream->link->psr_settings.psr_feature_enabled)
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
} }
finish: finish:
dc_sink_release(sink); dc_sink_release(sink);
@ -10753,18 +10751,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module) if (!adev->dm.freesync_module)
goto update; goto update;
if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
|| sink->sink_signal == SIGNAL_TYPE_EDP) { sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false; bool edid_check_required = false;
if (edid) { if (is_dp_capable_without_timing_msa(adev->dm.dc,
edid_check_required = is_dp_capable_without_timing_msa( amdgpu_dm_connector)) {
adev->dm.dc, if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
amdgpu_dm_connector); freesync_capable = true;
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
} else {
edid_check_required = edid->version > 1 ||
(edid->version == 1 &&
edid->revision > 1);
}
} }
if (edid_check_required == true && (edid->version > 1 || if (edid_check_required) {
(edid->version == 1 && edid->revision > 1))) {
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
timing = &edid->detailed_timings[i]; timing = &edid->detailed_timings[i];
@ -10784,14 +10788,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (range->flags != 1) if (range->flags != 1)
continue; continue;
amdgpu_dm_connector->min_vfreq = range->min_vfreq;
amdgpu_dm_connector->max_vfreq = range->max_vfreq;
amdgpu_dm_connector->pixel_clock_mhz =
range->pixel_clock_mhz * 10;
connector->display_info.monitor_range.min_vfreq = range->min_vfreq; connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
connector->display_info.monitor_range.max_vfreq = range->max_vfreq; connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
if (edid->revision >= 4) {
if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
connector->display_info.monitor_range.min_vfreq += 255;
if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
connector->display_info.monitor_range.max_vfreq += 255;
}
amdgpu_dm_connector->min_vfreq =
connector->display_info.monitor_range.min_vfreq;
amdgpu_dm_connector->max_vfreq =
connector->display_info.monitor_range.max_vfreq;
amdgpu_dm_connector->pixel_clock_mhz =
range->pixel_clock_mhz * 10;
break; break;
} }

View file

@ -619,10 +619,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
if (pipe_ctx == NULL) if (pipe_ctx == NULL)
return; return;
if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
pipe_ctx->stream_res.stream_enc->funcs->set_avmute( pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.stream_enc,
enable); enable);
/* Wait for two frame to make sure AV mute is sent out */
if (enable) {
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
}
}
} }
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx) void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)

View file

@ -142,6 +142,16 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
{ {
struct optc *optc1 = DCN10TG_FROM_TG(optc); struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
OPTC_SEG0_SRC_SEL, 0xf,
OPTC_SEG1_SRC_SEL, 0xf,
OPTC_SEG2_SRC_SEL, 0xf,
OPTC_SEG3_SRC_SEL, 0xf,
OPTC_NUM_OF_INPUT_SEGMENT, 0);
REG_UPDATE(OPTC_MEMORY_CONFIG,
OPTC_MEM_SEL, 0);
/* disable otg request until end of the first line /* disable otg request until end of the first line
* in the vertical blank region * in the vertical blank region
*/ */
@ -174,6 +184,13 @@ static void optc32_disable_phantom_otg(struct timing_generator *optc)
{ {
struct optc *optc1 = DCN10TG_FROM_TG(optc); struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
OPTC_SEG0_SRC_SEL, 0xf,
OPTC_SEG1_SRC_SEL, 0xf,
OPTC_SEG2_SRC_SEL, 0xf,
OPTC_SEG3_SRC_SEL, 0xf,
OPTC_NUM_OF_INPUT_SEGMENT, 0);
REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
} }

View file

@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)

View file

@ -147,15 +147,12 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
} }
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */ /* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
if (stream->link->psr_settings.psr_feature_enabled) { if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
vsc_packet_revision = vsc_packet_rev4;
else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
vsc_packet_revision = vsc_packet_rev2;
}
if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4; vsc_packet_revision = vsc_packet_rev4;
else if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4;
else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */ /* Update to revision 5 for extended colorimetry support */
if (stream->use_vsc_sdp_for_colorimetry) if (stream->use_vsc_sdp_for_colorimetry)

View file

@ -2397,6 +2397,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
{ {
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
int err, ret; int err, ret;
u32 pwm_mode;
int value; int value;
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
@ -2408,13 +2409,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
if (err) if (err)
return err; return err;
if (value == 0)
pwm_mode = AMD_FAN_CTRL_NONE;
else if (value == 1)
pwm_mode = AMD_FAN_CTRL_MANUAL;
else if (value == 2)
pwm_mode = AMD_FAN_CTRL_AUTO;
else
return -EINVAL;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) { if (ret < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret; return ret;
} }
ret = amdgpu_dpm_set_fan_control_mode(adev, value); ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);

View file

@ -27,8 +27,9 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <drm/drm_atomic_state_helper.h> #include <drm/drm_atomic_state_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_bridge.h> #include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h> #include <drm/drm_encoder.h>
#include <drm/drm_file.h> #include <drm/drm_file.h>
#include <drm/drm_of.h> #include <drm/drm_of.h>
@ -1213,6 +1214,47 @@ int drm_bridge_get_modes(struct drm_bridge *bridge,
} }
EXPORT_SYMBOL_GPL(drm_bridge_get_modes); EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
/**
* drm_bridge_edid_read - read the EDID data of the connected display
* @bridge: bridge control structure
* @connector: the connector to read EDID for
*
* If the bridge supports output EDID retrieval, as reported by the
* DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
* the EDID and return it. Otherwise return NULL.
*
* If &drm_bridge_funcs.edid_read is not set, fall back to using
* drm_bridge_get_edid() and wrapping it in struct drm_edid.
*
* RETURNS:
* The retrieved EDID on success, or NULL otherwise.
*/
const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector)
{
if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
return NULL;
/* Transitional: Fall back to ->get_edid. */
if (!bridge->funcs->edid_read) {
const struct drm_edid *drm_edid;
struct edid *edid;
edid = drm_bridge_get_edid(bridge, connector);
if (!edid)
return NULL;
drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH);
kfree(edid);
return drm_edid;
}
return bridge->funcs->edid_read(bridge, connector);
}
EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
/** /**
* drm_bridge_get_edid - get the EDID data of the connected display * drm_bridge_get_edid - get the EDID data of the connected display
* @bridge: bridge control structure * @bridge: bridge control structure
@ -1222,6 +1264,8 @@ EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
* DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
* get the EDID and return it. Otherwise return NULL. * get the EDID and return it. Otherwise return NULL.
* *
* Deprecated. Prefer using drm_bridge_edid_read().
*
* RETURNS: * RETURNS:
* The retrieved EDID on success, or NULL otherwise. * The retrieved EDID on success, or NULL otherwise.
*/ */

View file

@ -274,19 +274,24 @@ EXPORT_SYMBOL(drm_panel_disable);
* The modes probed from the panel are automatically added to the connector * The modes probed from the panel are automatically added to the connector
* that the panel is attached to. * that the panel is attached to.
* *
* Return: The number of modes available from the panel on success or a * Return: The number of modes available from the panel on success, or 0 on
* negative error code on failure. * failure (no modes).
*/ */
int drm_panel_get_modes(struct drm_panel *panel, int drm_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector) struct drm_connector *connector)
{ {
if (!panel) if (!panel)
return -EINVAL; return 0;
if (panel->funcs && panel->funcs->get_modes) if (panel->funcs && panel->funcs->get_modes) {
return panel->funcs->get_modes(panel, connector); int num;
return -EOPNOTSUPP; num = panel->funcs->get_modes(panel, connector);
if (num > 0)
return num;
}
return 0;
} }
EXPORT_SYMBOL(drm_panel_get_modes); EXPORT_SYMBOL(drm_panel_get_modes);

View file

@ -419,6 +419,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
count = connector_funcs->get_modes(connector); count = connector_funcs->get_modes(connector);
/* The .get_modes() callback should not return negative values. */
if (count < 0) {
drm_err(connector->dev, ".get_modes() returned %pe\n",
ERR_PTR(count));
count = 0;
}
/* /*
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
* override/firmware EDID. * override/firmware EDID.

View file

@ -1155,7 +1155,6 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
} }
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */ /* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder); wait_for_cmds_dispatched_to_panel(encoder);
@ -1256,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* step6d: enable dsi transcoder */ /* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder); gen11_dsi_enable_transcoder(encoder);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* step7: enable backlight */ /* step7: enable backlight */
intel_backlight_enable(crtc_state, conn_state); intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);

View file

@ -1945,16 +1945,12 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
* these devices we split the init OTP sequence into a deassert sequence and * these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part. * the actual init OTP part.
*/ */
static void fixup_mipi_sequences(struct drm_i915_private *i915, static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
struct intel_panel *panel) struct intel_panel *panel)
{ {
u8 *init_otp; u8 *init_otp;
int len; int len;
/* Limit this to VLV for now. */
if (!IS_VALLEYVIEW(i915))
return;
/* Limit this to v1 vid-mode sequences */ /* Limit this to v1 vid-mode sequences */
if (panel->vbt.dsi.config->is_cmd_mode || if (panel->vbt.dsi.config->is_cmd_mode ||
panel->vbt.dsi.seq_version != 1) panel->vbt.dsi.seq_version != 1)
@ -1990,6 +1986,41 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915,
panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1; panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
} }
/*
* Some machines (eg. Lenovo 82TQ) appear to have broken
* VBT sequences:
* - INIT_OTP is not present at all
* - what should be in INIT_OTP is in DISPLAY_ON
* - what should be in DISPLAY_ON is in BACKLIGHT_ON
* (along with the actual backlight stuff)
*
* To make those work we simply swap DISPLAY_ON and INIT_OTP.
*
* TODO: Do we need to limit this to specific machines,
* or examine the contents of the sequences to
* avoid false positives?
*/
static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
struct intel_panel *panel)
{
if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
}
}
static void fixup_mipi_sequences(struct drm_i915_private *i915,
struct intel_panel *panel)
{
if (DISPLAY_VER(i915) >= 11)
icl_fixup_mipi_sequences(i915, panel);
else if (IS_VALLEYVIEW(i915))
vlv_fixup_mipi_sequences(i915, panel);
}
static void static void
parse_mipi_sequence(struct drm_i915_private *i915, parse_mipi_sequence(struct drm_i915_private *i915,
struct intel_panel *panel) struct intel_panel *panel)
@ -3330,6 +3361,9 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
{ {
const struct child_device_config *child = &devdata->child; const struct child_device_config *child = &devdata->child;
if (!devdata)
return false;
if (!intel_bios_encoder_supports_dp(devdata) || if (!intel_bios_encoder_supports_dp(devdata) ||
!intel_bios_encoder_supports_hdmi(devdata)) !intel_bios_encoder_supports_hdmi(devdata))
return false; return false;

View file

@ -246,7 +246,14 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
return intel_port_to_phy(i915, dig_port->base.port); /*
* FIXME should we care about the (VBT defined) dig_port->aux_ch
* relationship or should this be purely defined by the hardware layout?
* Currently if the port doesn't appear in the VBT, or if it's declared
* as HDMI-only and routed to a combo PHY, the encoder either won't be
* present at all or it will not have an aux_ch assigned.
*/
return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
} }
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
@ -414,7 +421,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
if (DISPLAY_VER(dev_priv) < 12) /* FIXME this is a mess */
if (phy != PHY_NONE)
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
0, ICL_LANE_ENABLE_AUX); 0, ICL_LANE_ENABLE_AUX);
@ -437,7 +445,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0); /* FIXME this is a mess */
if (phy != PHY_NONE)
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
ICL_LANE_ENABLE_AUX, 0);
intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);

View file

@ -2462,7 +2462,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
static bool static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{ {
return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) && return ((IS_ELKHARTLAKE(i915) &&
IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) || IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) && IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->display.dpll.ref_clks.nssc == 38400; i915->display.dpll.ref_clks.nssc == 38400;

View file

@ -379,6 +379,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{ {
GEM_WARN_ON(obj->userptr.page_ref); GEM_WARN_ON(obj->userptr.page_ref);
if (!obj->userptr.notifier.mm)
return;
mmu_interval_notifier_remove(&obj->userptr.notifier); mmu_interval_notifier_remove(&obj->userptr.notifier);
obj->userptr.notifier.mm = NULL; obj->userptr.notifier.mm = NULL;
} }

View file

@ -278,9 +278,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_park_heartbeat(engine); intel_engine_park_heartbeat(engine);
intel_breadcrumbs_park(engine->breadcrumbs); intel_breadcrumbs_park(engine->breadcrumbs);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
if (engine->park) if (engine->park)
engine->park(engine); engine->park(engine);

View file

@ -3279,6 +3279,9 @@ static void execlists_park(struct intel_engine_cs *engine)
{ {
cancel_timer(&engine->execlists.timer); cancel_timer(&engine->execlists.timer);
cancel_timer(&engine->execlists.preempt); cancel_timer(&engine->execlists.preempt);
/* Reset upon idling, or we may delay the busy wakeup. */
WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
} }
static void add_to_engine(struct i915_request *rq) static void add_to_engine(struct i915_request *rq)

View file

@ -72,12 +72,13 @@ hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
struct intel_uncore *uncore = ddat->uncore; struct intel_uncore *uncore = ddat->uncore;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
mutex_lock(&hwmon->hwmon_lock); with_intel_runtime_pm(uncore->rpm, wakeref) {
mutex_lock(&hwmon->hwmon_lock);
with_intel_runtime_pm(uncore->rpm, wakeref)
intel_uncore_rmw(uncore, reg, clear, set); intel_uncore_rmw(uncore, reg, clear, set);
mutex_unlock(&hwmon->hwmon_lock); mutex_unlock(&hwmon->hwmon_lock);
}
} }
/* /*
@ -136,20 +137,21 @@ hwm_energy(struct hwm_drvdata *ddat, long *energy)
else else
rgaddr = hwmon->rg.energy_status_all; rgaddr = hwmon->rg.energy_status_all;
mutex_lock(&hwmon->hwmon_lock); with_intel_runtime_pm(uncore->rpm, wakeref) {
mutex_lock(&hwmon->hwmon_lock);
with_intel_runtime_pm(uncore->rpm, wakeref)
reg_val = intel_uncore_read(uncore, rgaddr); reg_val = intel_uncore_read(uncore, rgaddr);
if (reg_val >= ei->reg_val_prev) if (reg_val >= ei->reg_val_prev)
ei->accum_energy += reg_val - ei->reg_val_prev; ei->accum_energy += reg_val - ei->reg_val_prev;
else else
ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val; ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
ei->reg_val_prev = reg_val; ei->reg_val_prev = reg_val;
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY, *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
hwmon->scl_shift_energy); hwmon->scl_shift_energy);
mutex_unlock(&hwmon->hwmon_lock); mutex_unlock(&hwmon->hwmon_lock);
}
} }
static ssize_t static ssize_t
@ -404,6 +406,7 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
/* Block waiting for GuC reset to complete when needed */ /* Block waiting for GuC reset to complete when needed */
for (;;) { for (;;) {
wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
mutex_lock(&hwmon->hwmon_lock); mutex_lock(&hwmon->hwmon_lock);
prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
@ -417,14 +420,13 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
} }
mutex_unlock(&hwmon->hwmon_lock); mutex_unlock(&hwmon->hwmon_lock);
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
schedule(); schedule();
} }
finish_wait(&ddat->waitq, &wait); finish_wait(&ddat->waitq, &wait);
if (ret) if (ret)
goto unlock; goto exit;
wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */ /* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
if (val == PL1_DISABLE) { if (val == PL1_DISABLE) {
@ -444,9 +446,8 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit, intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval); PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
exit: exit:
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
unlock:
mutex_unlock(&hwmon->hwmon_lock); mutex_unlock(&hwmon->hwmon_lock);
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
return ret; return ret;
} }

View file

@ -7,7 +7,8 @@
#include <machine/pte.h> #include <machine/pte.h>
#include <linux/types.h> #include <linux/types.h>
#define pgprot_val(v) (v) #define pgprot_val(p) (p)
#define pgprot_decrypted(p) (p)
#define PAGE_KERNEL 0 #define PAGE_KERNEL 0
#define PAGE_KERNEL_IO 0 #define PAGE_KERNEL_IO 0

View file

@ -555,6 +555,37 @@ struct drm_bridge_funcs {
int (*get_modes)(struct drm_bridge *bridge, int (*get_modes)(struct drm_bridge *bridge,
struct drm_connector *connector); struct drm_connector *connector);
/**
* @edid_read:
*
* Read the EDID data of the connected display.
*
* The @edid_read callback is the preferred way of reporting mode
* information for a display connected to the bridge output. Bridges
* that support reading EDID shall implement this callback and leave
* the @get_modes callback unimplemented.
*
* The caller of this operation shall first verify the output
* connection status and refrain from reading EDID from a disconnected
* output.
*
* This callback is optional. Bridges that implement it shall set the
* DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops.
*
* The connector parameter shall be used for the sole purpose of EDID
* retrieval, and shall not be stored internally by bridge drivers for
* future usage.
*
* RETURNS:
*
* An edid structure newly allocated with drm_edid_alloc() or returned
* from drm_edid_read() family of functions on success, or NULL
* otherwise. The caller is responsible for freeing the returned edid
* structure with drm_edid_free().
*/
const struct drm_edid *(*edid_read)(struct drm_bridge *bridge,
struct drm_connector *connector);
/** /**
* @get_edid: * @get_edid:
* *
@ -888,6 +919,8 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge); enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
int drm_bridge_get_modes(struct drm_bridge *bridge, int drm_bridge_get_modes(struct drm_bridge *bridge,
struct drm_connector *connector); struct drm_connector *connector);
const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector);
struct edid *drm_bridge_get_edid(struct drm_bridge *bridge, struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector); struct drm_connector *connector);
void drm_bridge_hpd_enable(struct drm_bridge *bridge, void drm_bridge_hpd_enable(struct drm_bridge *bridge,

View file

@ -898,7 +898,8 @@ struct drm_connector_helper_funcs {
* *
* RETURNS: * RETURNS:
* *
* The number of modes added by calling drm_mode_probed_add(). * The number of modes added by calling drm_mode_probed_add(). Return 0
* on failures (no modes) instead of negative error codes.
*/ */
int (*get_modes)(struct drm_connector *connector); int (*get_modes)(struct drm_connector *connector);

View file

@ -81,6 +81,12 @@ struct ttm_tt {
* page_flags = TTM_TT_FLAG_EXTERNAL | * page_flags = TTM_TT_FLAG_EXTERNAL |
* TTM_TT_FLAG_EXTERNAL_MAPPABLE; * TTM_TT_FLAG_EXTERNAL_MAPPABLE;
* *
* TTM_TT_FLAG_DECRYPTED: The mapped ttm pages should be marked as
* not encrypted. The framework will try to match what the dma layer
* is doing, but note that it is a little fragile because ttm page
* fault handling abuses the DMA api a bit and dma_map_attrs can't be
* used to assure pgprot always matches.
*
* TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
* set by TTM after ttm_tt_populate() has successfully returned, and is * set by TTM after ttm_tt_populate() has successfully returned, and is
* then unset when TTM calls ttm_tt_unpopulate(). * then unset when TTM calls ttm_tt_unpopulate().
@ -89,8 +95,9 @@ struct ttm_tt {
#define TTM_TT_FLAG_ZERO_ALLOC BIT(1) #define TTM_TT_FLAG_ZERO_ALLOC BIT(1)
#define TTM_TT_FLAG_EXTERNAL BIT(2) #define TTM_TT_FLAG_EXTERNAL BIT(2)
#define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3) #define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3)
#define TTM_TT_FLAG_DECRYPTED BIT(4)
#define TTM_TT_FLAG_PRIV_POPULATED BIT(4) #define TTM_TT_FLAG_PRIV_POPULATED BIT(5)
uint32_t page_flags; uint32_t page_flags;
/** @num_pages: Number of pages in the page array. */ /** @num_pages: Number of pages in the page array. */
uint32_t num_pages; uint32_t num_pages;

View file

@ -0,0 +1,16 @@
/* Public domain. */
#ifndef _LINUX_CC_PLATFORM_H
#define _LINUX_CC_PLATFORM_H
#include <linux/types.h>
#define CC_ATTR_GUEST_MEM_ENCRYPT 0
static inline bool
cc_platform_has(int x)
{
return false;
}
#endif

View file

@ -296,7 +296,13 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
enum ttm_caching caching; enum ttm_caching caching;
man = ttm_manager_type(bo->bdev, res->mem_type); man = ttm_manager_type(bo->bdev, res->mem_type);
caching = man->use_tt ? bo->ttm->caching : res->bus.caching; if (man->use_tt) {
caching = bo->ttm->caching;
if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
tmp = pgprot_decrypted(tmp);
} else {
caching = res->bus.caching;
}
return ttm_prot_from_caching(caching, tmp); return ttm_prot_from_caching(caching, tmp);
} }
@ -348,6 +354,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
.no_wait_gpu = false .no_wait_gpu = false
}; };
struct ttm_tt *ttm = bo->ttm; struct ttm_tt *ttm = bo->ttm;
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, bo->resource->mem_type);
pgprot_t prot; pgprot_t prot;
int ret; int ret;
@ -357,7 +365,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
if (num_pages == 1 && ttm->caching == ttm_cached) { if (num_pages == 1 && ttm->caching == ttm_cached &&
!(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
/* /*
* We're mapping a single page, and the desired * We're mapping a single page, and the desired
* page protection is consistent with the bo. * page protection is consistent with the bo.

View file

@ -31,11 +31,14 @@
#define pr_fmt(fmt) "[TTM] " fmt #define pr_fmt(fmt) "[TTM] " fmt
#include <linux/cc_platform.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/shmem_fs.h> #include <linux/shmem_fs.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/module.h> #include <linux/module.h>
#include <drm/drm_cache.h> #include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include <drm/drm_util.h>
#include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h> #include <drm/ttm/ttm_tt.h>
@ -60,6 +63,7 @@ static atomic_long_t ttm_dma32_pages_allocated;
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{ {
struct ttm_device *bdev = bo->bdev; struct ttm_device *bdev = bo->bdev;
struct drm_device *ddev = bo->base.dev;
uint32_t page_flags = 0; uint32_t page_flags = 0;
dma_resv_assert_held(bo->base.resv); dma_resv_assert_held(bo->base.resv);
@ -81,6 +85,15 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
pr_err("Illegal buffer object type\n"); pr_err("Illegal buffer object type\n");
return -EINVAL; return -EINVAL;
} }
/*
* When using dma_alloc_coherent with memory encryption the
* mapped TT pages need to be decrypted or otherwise the drivers
* will end up sending encrypted mem to the gpu.
*/
if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
page_flags |= TTM_TT_FLAG_DECRYPTED;
drm_info(ddev, "TT memory decryption enabled.");
}
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL)) if (unlikely(bo->ttm == NULL))

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_mmap.c,v 1.187 2024/04/02 08:39:17 deraadt Exp $ */ /* $OpenBSD: uvm_mmap.c,v 1.188 2024/04/03 22:21:48 kettenis Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */ /* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/* /*
@ -1244,7 +1244,6 @@ sys_kbind(struct proc *p, void *v, register_t *retval)
last_baseva = VM_MAXUSER_ADDRESS; last_baseva = VM_MAXUSER_ADDRESS;
kva = 0; kva = 0;
TAILQ_INIT(&dead_entries); TAILQ_INIT(&dead_entries);
KERNEL_LOCK();
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
baseva = (vaddr_t)paramp[i].kb_addr; baseva = (vaddr_t)paramp[i].kb_addr;
s = paramp[i].kb_size; s = paramp[i].kb_size;
@ -1295,7 +1294,6 @@ redo:
vm_map_unlock(kernel_map); vm_map_unlock(kernel_map);
} }
uvm_unmap_detach(&dead_entries, AMAP_REFALL); uvm_unmap_detach(&dead_entries, AMAP_REFALL);
KERNEL_UNLOCK();
return error; return error;
} }

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_pmap.h,v 1.33 2023/04/13 15:23:23 miod Exp $ */ /* $OpenBSD: uvm_pmap.h,v 1.34 2024/04/03 18:43:32 miod Exp $ */
/* $NetBSD: uvm_pmap.h,v 1.1 2000/06/27 09:00:14 mrg Exp $ */ /* $NetBSD: uvm_pmap.h,v 1.1 2000/06/27 09:00:14 mrg Exp $ */
/* /*
@ -173,9 +173,10 @@ void pmap_update(pmap_t);
void pmap_zero_page(struct vm_page *); void pmap_zero_page(struct vm_page *);
#endif #endif
void pmap_virtual_space(vaddr_t *, vaddr_t *);
#if defined(PMAP_STEAL_MEMORY) #if defined(PMAP_STEAL_MEMORY)
vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *); vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
#else
void pmap_virtual_space(vaddr_t *, vaddr_t *);
#endif #endif
/* nested pmaps are used in i386/amd64 vmm */ /* nested pmaps are used in i386/amd64 vmm */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: parse.y,v 1.457 2024/03/20 09:35:46 claudio Exp $ */ /* $OpenBSD: parse.y,v 1.458 2024/04/03 08:57:26 claudio Exp $ */
/* /*
* Copyright (c) 2002, 2003, 2004 Henning Brauer <henning@openbsd.org> * Copyright (c) 2002, 2003, 2004 Henning Brauer <henning@openbsd.org>
@ -4985,11 +4985,23 @@ expand_rule(struct filter_rule *rule, struct filter_rib_l *rib,
return (0); return (0);
} }
static int
h2i(char c)
{
if (c >= '0' && c <= '9')
return c - '0';
else if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
else if (c >= 'A' && c <= 'F')
return c - 'A' + 10;
else
return -1;
}
int int
str2key(char *s, char *dest, size_t max_len) str2key(char *s, char *dest, size_t max_len)
{ {
unsigned int i; size_t i;
char t[3];
if (strlen(s) / 2 > max_len) { if (strlen(s) / 2 > max_len) {
yyerror("key too long"); yyerror("key too long");
@ -5002,15 +5014,15 @@ str2key(char *s, char *dest, size_t max_len)
} }
for (i = 0; i < strlen(s) / 2; i++) { for (i = 0; i < strlen(s) / 2; i++) {
t[0] = s[2*i]; int hi, lo;
t[1] = s[2*i + 1];
t[2] = 0; hi = h2i(s[2 * i]);
if (!isxdigit((unsigned char)t[0]) || lo = h2i(s[2 * i + 1]);
!isxdigit((unsigned char)t[1])) { if (hi == -1 || lo == -1) {
yyerror("key must be specified in hex"); yyerror("key must be specified in hex");
return (-1); return (-1);
} }
dest[i] = strtoul(t, NULL, 16); dest[i] = (hi << 4) | lo;
} }
return (0); return (0);

View file

@ -473,7 +473,7 @@ vm_agentx_operstate(int mask)
static void static void
vm_agentx_vmHvSoftware(struct agentx_varbind *vb) vm_agentx_vmHvSoftware(struct agentx_varbind *vb)
{ {
agentx_varbind_string(vb, "OpenBSD VMM"); agentx_varbind_string(vb, "SecBSD VMM");
} }
static void static void