Merge tag 'drm-next-2023-05-05' of git://anongit.freedesktop.org/drm/drm

Pull more drm fixes from Dave Airlie:
 "This is the fixes for the last couple of weeks for i915 and last 3
  weeks for amdgpu, lots of them but pretty scattered around and all
  pretty small.

  amdgpu:
   - SR-IOV fixes
   - DCN 3.2 fixes
   - DC mclk handling fixes
   - eDP fixes
   - SubVP fixes
   - HDCP regression fix
   - DSC fixes
   - DC FP fixes
   - DCN 3.x fixes
   - Display flickering fix when switching between vram and gtt
   - Z8 power saving fix
   - Fix hang when skipping modeset
   - GPU reset fixes
   - Doorbell fix when resizing BARs
   - Fix spurious warnings in gmc
   - Locking fix for AMDGPU_SCHED IOCTL
   - SR-IOV fix
   - DCN 3.1.4 fix
   - DCN 3.2 fix
   - Fix job cleanup when CS is aborted

  i915:
   - skl pipe source size check
   - mtl transcoder mask fix
   - DSI power on sequence fix
   - GuC versioning corner case fix"

* tag 'drm-next-2023-05-05' of git://anongit.freedesktop.org/drm/drm: (48 commits)
  drm/amdgpu: drop redundant sched job cleanup when cs is aborted
  drm/amd/display: filter out invalid bits in pipe_fuses
  drm/amd/display: Change default Z8 watermark values
  drm/amdgpu: disable SDMA WPTR_POLL_ENABLE for SR-IOV
  drm/amdgpu: add a missing lock for AMDGPU_SCHED
  drm/amdgpu: fix an amdgpu_irq_put() issue in gmc_v9_0_hw_fini()
  drm/amdgpu: fix amdgpu_irq_put call trace in gmc_v10_0_hw_fini
  drm/amdgpu: fix amdgpu_irq_put call trace in gmc_v11_0_hw_fini
  drm/amdgpu: Enable doorbell selfring after resize FB BAR
  drm/amdgpu: Use the default reset when loading or reloading the driver
  drm/amdgpu: Fix mode2 reset for sienna cichlid
  drm/i915/dsi: Use unconditional msleep() instead of intel_dsi_msleep()
  drm/i915/mtl: Add the missing CPU transcoder mask in intel_device_info
  drm/i915/guc: Actually return an error if GuC version range check fails
  drm/amd/display: Lowering min Z8 residency time
  drm/amd/display: fix flickering caused by S/G mode
  drm/amd/display: Set min_width and min_height capability for DCN30
  drm/amd/display: Isolate remaining FPU code in DCN32
  drm/amd/display: Update bounding box values for DCN321
  drm/amd/display: Do not clear GPINT register when releasing DMUB from reset
  ...
This commit is contained in:
Linus Torvalds
2023-05-05 11:49:22 -07:00
50 changed files with 413 additions and 261 deletions

View File

@@ -1276,7 +1276,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
r = drm_sched_job_add_dependency(&leader->base, fence);
if (r) {
dma_fence_put(fence);
goto error_cleanup;
return r;
}
}
@@ -1303,7 +1303,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
if (r) {
r = -EAGAIN;
goto error_unlock;
mutex_unlock(&p->adev->notifier_lock);
return r;
}
p->fence = dma_fence_get(&leader->base.s_fence->finished);
@@ -1350,14 +1351,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
mutex_unlock(&p->adev->notifier_lock);
mutex_unlock(&p->bo_list->bo_list_mutex);
return 0;
error_unlock:
mutex_unlock(&p->adev->notifier_lock);
error_cleanup:
for (i = 0; i < p->gang_size; ++i)
drm_sched_job_cleanup(&p->jobs[i]->base);
return r;
}
/* Cleanup the parser structure */

View File

@@ -2539,8 +2539,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_fru_get_product_info(adev);
init_failed:
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, true);
return r;
}
@@ -3580,6 +3578,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
int r, i;
bool px = false;
u32 max_MBps;
int tmp;
adev->shutdown = false;
adev->flags = flags;
@@ -3801,7 +3800,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
} else {
tmp = amdgpu_reset_method;
/* It should do a default reset when loading or reloading the driver,
* regardless of the module parameter reset_method.
*/
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
amdgpu_reset_method = tmp;
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
@@ -3859,18 +3864,6 @@ fence_driver_init:
r = amdgpu_device_ip_init(adev);
if (r) {
/* failed in exclusive mode due to timeout */
if (amdgpu_sriov_vf(adev) &&
!amdgpu_sriov_runtime(adev) &&
amdgpu_virt_mmio_blocked(adev) &&
!amdgpu_virt_wait_reset(adev)) {
dev_err(adev->dev, "VF exclusive mode timeout\n");
/* Don't send request since VF is inactive. */
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
adev->virt.ops = NULL;
r = -EAGAIN;
goto release_ras_con;
}
dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
goto release_ras_con;
@@ -3939,8 +3932,10 @@ fence_driver_init:
msecs_to_jiffies(AMDGPU_RESUME_MS));
}
if (amdgpu_sriov_vf(adev))
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_release_full_gpu(adev, true);
flush_delayed_work(&adev->delayed_init_work);
}
r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (r)
@@ -3980,6 +3975,20 @@ fence_driver_init:
return 0;
release_ras_con:
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, true);
/* failed in exclusive mode due to timeout */
if (amdgpu_sriov_vf(adev) &&
!amdgpu_sriov_runtime(adev) &&
amdgpu_virt_mmio_blocked(adev) &&
!amdgpu_virt_wait_reset(adev)) {
dev_err(adev->dev, "VF exclusive mode timeout\n");
/* Don't send request since VF is inactive. */
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
adev->virt.ops = NULL;
r = -EAGAIN;
}
amdgpu_release_ras_context(adev);
failed:

View File

@@ -38,6 +38,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
{
struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx_mgr *mgr;
struct amdgpu_ctx *ctx;
uint32_t id;
int r;
@@ -51,8 +52,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
return r;
}
idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
idr_for_each_entry(&mgr->ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
mutex_unlock(&mgr->lock);
fdput(f);
return 0;

View File

@@ -1143,7 +1143,6 @@ static int gmc_v10_0_hw_fini(void *handle)
return 0;
}
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
return 0;

View File

@@ -951,7 +951,6 @@ static int gmc_v11_0_hw_fini(void *handle)
return 0;
}
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v11_0_gart_disable(adev);

View File

@@ -1999,7 +1999,6 @@ static int gmc_v9_0_hw_fini(void *handle)
if (adev->mmhub.funcs->update_power_gating)
adev->mmhub.funcs->update_power_gating(adev, false);
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
return 0;

View File

@@ -430,7 +430,7 @@ static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev)
MMSCH_COMMAND__END;
header.version = MMSCH_VERSION;
header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
header.total_size = RREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE);
header.jpegdec.init_status = 0;
header.jpegdec.table_offset = 0;

View File

@@ -531,13 +531,6 @@ static void nv_program_aspm(struct amdgpu_device *adev)
}
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
const struct amdgpu_ip_block_version nv_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
@@ -999,6 +992,11 @@ static int nv_common_late_init(void *handle)
}
}
/* Enable selfring doorbell aperture late because doorbell BAR
* aperture will change if resize BAR successfully in gmc sw_init.
*/
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
return 0;
}
@@ -1038,7 +1036,7 @@ static int nv_common_hw_init(void *handle)
if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
nv_enable_doorbell_aperture(adev, true);
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
return 0;
}
@@ -1047,8 +1045,13 @@ static int nv_common_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* disable the doorbell aperture */
nv_enable_doorbell_aperture(adev, false);
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because nv_enable_doorbell_aperture
* has been removed and there is no need to delay disabling
* selfring doorbell.
*/
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
return 0;
}

View File

@@ -510,10 +510,7 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
if (amdgpu_sriov_vf(adev))
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
else
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);

View File

@@ -40,7 +40,7 @@ static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_c
adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev))
return true;
#endif
return false;
return amdgpu_reset_method == AMD_RESET_METHOD_MODE2;
}
static struct amdgpu_reset_handler *

View File

@@ -619,13 +619,6 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
adev->nbio.funcs->program_aspm(adev);
}
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
const struct amdgpu_ip_block_version vega10_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
@@ -1125,6 +1118,11 @@ static int soc15_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
/* Enable selfring doorbell aperture late because doorbell BAR
* aperture will change if resize BAR successfully in gmc sw_init.
*/
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
return 0;
}
@@ -1182,7 +1180,8 @@ static int soc15_common_hw_init(void *handle)
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
/* HW doorbell routing policy: doorbell writing not
* in SDMA/IH/MM/ACV range will be routed to CP. So
* we need to init SDMA doorbell range prior
@@ -1198,8 +1197,14 @@ static int soc15_common_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* disable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, false);
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc15_enable_doorbell_aperture
* has been removed and there is no need to delay disabling
* selfring doorbell.
*/
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);

View File

@@ -450,13 +450,6 @@ static void soc21_program_aspm(struct amdgpu_device *adev)
adev->nbio.funcs->program_aspm(adev);
}
static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
const struct amdgpu_ip_block_version soc21_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
@@ -764,6 +757,11 @@ static int soc21_common_late_init(void *handle)
amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
}
/* Enable selfring doorbell aperture late because doorbell BAR
* aperture will change if resize BAR successfully in gmc sw_init.
*/
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
return 0;
}
@@ -797,7 +795,7 @@ static int soc21_common_hw_init(void *handle)
if (adev->nbio.funcs->remap_hdp_registers)
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
soc21_enable_doorbell_aperture(adev, true);
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
return 0;
}
@@ -806,8 +804,13 @@ static int soc21_common_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* disable the doorbell aperture */
soc21_enable_doorbell_aperture(adev, false);
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc21_enable_doorbell_aperture
* has been removed and there is no need to delay disabling
* selfring doorbell.
*/
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_put_irq(adev);

View File

@@ -3128,9 +3128,12 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->edid);
}
aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
if (!aconnector->timing_requested)
dm_error("%s: failed to create aconnector->requested_timing\n", __func__);
if (!aconnector->timing_requested) {
aconnector->timing_requested =
kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
if (!aconnector->timing_requested)
dm_error("failed to create aconnector->requested_timing\n");
}
drm_connector_update_edid_property(connector, aconnector->edid);
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
@@ -7894,6 +7897,13 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
}
static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct drm_device *dev,
@@ -7968,6 +7978,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
continue;
dc_plane = dm_new_plane_state->dc_state;
if (!dc_plane)
continue;
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
@@ -8034,11 +8046,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/*
* Only allow immediate flips for fast updates that don't
* change FB pitch, DCC state, rotation or mirroing.
* change memory domain, FB pitch, DCC state, rotation or
* mirroring.
*/
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST;
acrtc_state->update_type == UPDATE_TYPE_FAST &&
get_mem_type(old_plane_state->fb) == get_mem_type(fb);
timestamp_ns = ktime_get_ns();
bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
@@ -8550,6 +8564,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (!adev->dm.hdcp_workqueue)
continue;
pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
if (!connector)
@@ -8598,6 +8615,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (!adev->dm.hdcp_workqueue)
continue;
new_crtc_state = NULL;
old_crtc_state = NULL;
@@ -9616,8 +9636,9 @@ static int dm_update_plane_state(struct dc *dc,
return -EINVAL;
}
if (dm_old_plane_state->dc_state)
dc_plane_state_release(dm_old_plane_state->dc_state);
dc_plane_state_release(dm_old_plane_state->dc_state);
dm_new_plane_state->dc_state = NULL;
*lock_and_validation_needed = true;
@@ -10154,6 +10175,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
ret = -EINVAL;
goto fail;
}

View File

@@ -687,7 +687,6 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
return;
data[0] |= (1 << 1); // set bit 1 to 1
return;
if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
return;

View File

@@ -379,13 +379,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (aconnector->dc_sink && connector->state) {
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index];
connector->state->hdcp_content_type =
hdcp_w->hdcp_content_type[connector->index];
connector->state->content_protection =
hdcp_w->content_protection[connector->index];
if (adev->dm.hdcp_workqueue) {
struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
struct hdcp_workqueue *hdcp_w =
&hdcp_work[aconnector->dc_link->link_index];
connector->state->hdcp_content_type =
hdcp_w->hdcp_content_type[connector->index];
connector->state->content_protection =
hdcp_w->content_protection[connector->index];
}
}
if (aconnector->dc_sink) {
@@ -1406,6 +1410,7 @@ int pre_validate_dsc(struct drm_atomic_state *state,
ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
if (ret != 0) {
DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
ret = -EINVAL;
goto clean_exit;
}

View File

@@ -89,6 +89,7 @@ void dc_fpu_begin(const char *function_name, const int line)
if (*pcpu == 1) {
#if defined(CONFIG_X86)
migrate_disable();
kernel_fpu_begin();
#elif defined(CONFIG_PPC64)
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
@@ -129,6 +130,7 @@ void dc_fpu_end(const char *function_name, const int line)
if (*pcpu <= 0) {
#if defined(CONFIG_X86)
kernel_fpu_end();
migrate_enable();
#elif defined(CONFIG_PPC64)
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
disable_kernel_vsx();

View File

@@ -878,6 +878,8 @@ void dcn32_clk_mgr_construct(
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
struct clk_log_info log_info = {0};
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn32_funcs;
if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev)) {
@@ -911,6 +913,7 @@ void dcn32_clk_mgr_construct(
clk_mgr->base.clks.ref_dtbclk_khz = 268750;
}
/* integer part is now VCO frequency in kHz */
clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr);
@@ -918,6 +921,8 @@ void dcn32_clk_mgr_construct(
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 4300000; /* Updated as per HW docs */
dcn32_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
if (ctx->dc->debug.disable_dtb_ref_clk_switch &&
clk_mgr->base.clks.ref_dtbclk_khz != clk_mgr->base.boot_snapshot.dtbclk) {
clk_mgr->base.clks.ref_dtbclk_khz = clk_mgr->base.boot_snapshot.dtbclk;

View File

@@ -1707,6 +1707,9 @@ bool dc_remove_plane_from_context(
struct dc_stream_status *stream_status = NULL;
struct resource_pool *pool = dc->res_pool;
if (!plane_state)
return true;
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
stream_status = &context->stream_status[i];

View File

@@ -1454,6 +1454,7 @@ struct dc_link {
struct ddc_service *ddc;
enum dp_panel_mode panel_mode;
bool aux_mode;
/* Private to DC core */

View File

@@ -144,7 +144,7 @@ struct test_pattern {
unsigned int cust_pattern_size;
};
#define SUBVP_DRR_MARGIN_US 600 // 600us for DRR margin (SubVP + DRR)
#define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR)
enum mall_stream_type {
SUBVP_NONE, // subvp not in use

Some files were not shown because too many files have changed in this diff Show More