Merge tag 'drm-xe-next-fixes-2024-01-16' of https://gitlab.freedesktop.org/drm/xe...
authorDave Airlie <airlied@redhat.com>
Fri, 19 Jan 2024 06:13:44 +0000 (16:13 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 19 Jan 2024 06:13:51 +0000 (16:13 +1000)
Driver Changes:
- Fix for definition of wakeref_t
- Fix for an error code aliasing
- Fix for VM_UNBIND_ALL in the case there are no bound VMAs
- Fixes for a number of __iomem address space mismatches reported by sparse
- Fixes for the assignment of exec_queue priority
- A Fix for skip_guc_pc not taking effect
- Workaround for a build problem on GCC 11
- A couple of fixes for error paths
- Fix a Flat CCS compression metadata copy issue
- Fix a misplace array bounds checking
- Don't have display support depend on EXPERT (as discussed on IRC)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240116102204.106520-1-thomas.hellstrom@linux.intel.com
256 files changed:
Documentation/gpu/amdgpu/apu-asic-info-table.csv
drivers/gpu/drm/amd/amdgpu/aldebaran.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
drivers/gpu/drm/amd/amdgpu/atom.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
drivers/gpu/drm/amd/display/dc/Makefile
drivers/gpu/drm/amd/display/dc/basics/conversion.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_state.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
drivers/gpu/drm/amd/display/dc/dc_dp_types.h
drivers/gpu/drm/amd/display/dc/dc_helper.c
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
drivers/gpu/drm/amd/display/dc/dc_plane.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dc_plane_priv.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dc_state.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dc_state_priv.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_stream_priv.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
drivers/gpu/drm/amd/display/dc/dcn10/Makefile
drivers/gpu/drm/amd/display/dc/dcn20/Makefile
drivers/gpu/drm/amd/display/dc/dcn201/Makefile
drivers/gpu/drm/amd/display/dc/dcn21/Makefile
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
drivers/gpu/drm/amd/display/dc/dcn30/Makefile
drivers/gpu/drm/amd/display/dc/dcn301/Makefile
drivers/gpu/drm/amd/display/dc/dcn302/Makefile [deleted file]
drivers/gpu/drm/amd/display/dc/dcn31/Makefile
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
drivers/gpu/drm/amd/display/dc/dcn314/Makefile
drivers/gpu/drm/amd/display/dc/dcn32/Makefile
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
drivers/gpu/drm/amd/display/dc/dcn35/Makefile
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
drivers/gpu/drm/amd/display/dc/hwss/Makefile
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c [moved from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c with 99% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h [moved from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
drivers/gpu/drm/amd/display/dc/inc/link.h
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
drivers/gpu/drm/amd/display/dc/link/link_factory.c
drivers/gpu/drm/amd/display/dc/link/link_validation.c
drivers/gpu/drm/amd/display/dc/link/link_validation.h
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
drivers/gpu/drm/amd/display/dmub/dmub_srv.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
drivers/gpu/drm/amd/display/include/audio_types.h
drivers/gpu/drm/amd/display/include/fixed31_32.h
drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.h
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/i915/display/intel_cx0_phy.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/gem/i915_gem_context_types.h
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_phys.c
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gt/intel_gsc.h
drivers/gpu/drm/i915/gt/intel_gt_regs.h
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/uc/intel_guc.h
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_hwmon.c
drivers/gpu/drm/i915/i915_perf_types.h
drivers/gpu/drm/imagination/pvr_device.h
drivers/gpu/drm/nouveau/nouveau_vmm.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
drivers/gpu/drm/v3d/v3d_debugfs.c
include/drm/drm_gpuvm.h
include/uapi/drm/i915_drm.h

index 2e76b427ba1ee5f4806a3487ff929c4f252b4695..18868abe2a913f7fe21b5e64ac53d02a88841897 100644 (file)
@@ -7,6 +7,7 @@ SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0
 Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1
 Ryzen 6000 series / Ryzen 7x35 series / Ryzen 7x36 series, YELLOW CARP / Rembrandt / Rembrandt-R, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3
 Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
-Ryzen 7x45 series (FL1), Dragon Range, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
+Ryzen 7x45 series (FL1), Dragon Range, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
 Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8
-Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
\ No newline at end of file
+Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
+Ryzen 8x40 series, Hawk Point, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
index 02f4c6f9d4f6876e3edc0fa7de1f38a8a969986c..576067d66bb9af69fd9c3f3c80994f7aa00a319e 100644 (file)
@@ -330,6 +330,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
 {
        struct list_head *reset_device_list = reset_context->reset_device_list;
        struct amdgpu_device *tmp_adev = NULL;
+       struct amdgpu_ras *con;
        int r;
 
        if (reset_device_list == NULL)
@@ -355,7 +356,30 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
                 */
                amdgpu_register_gpu_instance(tmp_adev);
 
-               /* Resume RAS */
+               /* Resume RAS, ecc_irq */
+               con = amdgpu_ras_get_context(tmp_adev);
+               if (!amdgpu_sriov_vf(tmp_adev) && con) {
+                       if (tmp_adev->sdma.ras &&
+                               tmp_adev->sdma.ras->ras_block.ras_late_init) {
+                               r = tmp_adev->sdma.ras->ras_block.ras_late_init(tmp_adev,
+                                               &tmp_adev->sdma.ras->ras_block.ras_comm);
+                               if (r) {
+                                       dev_err(tmp_adev->dev, "SDMA failed to execute ras_late_init! ret:%d\n", r);
+                                       goto end;
+                               }
+                       }
+
+                       if (tmp_adev->gfx.ras &&
+                               tmp_adev->gfx.ras->ras_block.ras_late_init) {
+                               r = tmp_adev->gfx.ras->ras_block.ras_late_init(tmp_adev,
+                                               &tmp_adev->gfx.ras->ras_block.ras_comm);
+                               if (r) {
+                                       dev_err(tmp_adev->dev, "GFX failed to execute ras_late_init! ret:%d\n", r);
+                                       goto end;
+                               }
+                       }
+               }
+
                amdgpu_ras_resume(tmp_adev);
 
                /* Update PSP FW topology after reset */
index 616b6c91176796b6f72be410c5e8541e8416492e..3d8a48f46b015613dc44517ebd20d5250df5a3b1 100644 (file)
@@ -1144,6 +1144,7 @@ struct amdgpu_device {
        bool                            debug_vm;
        bool                            debug_largebar;
        bool                            debug_disable_soft_recovery;
+       bool                            debug_use_vram_fw_buf;
 };
 
 static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
index 067690ba7bffd4192817fe3acf9d46a19f1f274c..77e2636602887034c188ec695591d20e5b087b60 100644 (file)
@@ -138,6 +138,9 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
        amdgpu_device_gpu_recover(adev, NULL, &reset_context);
 }
 
+static const struct drm_client_funcs kfd_client_funcs = {
+       .unregister     = drm_client_release,
+};
 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 {
        int i;
@@ -161,7 +164,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                        .enable_mes = adev->enable_mes,
                };
 
-               ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", NULL);
+               ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", &kfd_client_funcs);
                if (ret) {
                        dev_err(adev->dev, "Failed to init DRM client: %d\n", ret);
                        return;
@@ -695,10 +698,8 @@ err:
 void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
 {
        enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
-       /* Temporary workaround to fix issues observed in some
-        * compute applications when GFXOFF is enabled on GFX11.
-        */
-       if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11) {
+       if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+           ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
                pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
                amdgpu_gfx_off_ctrl(adev, idle);
        } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
index cf6ed5fce291f946854d329fa91e0fb6eedbc61a..f262b9d89541a8a971a394b5f0da0f6a1368ba65 100644 (file)
@@ -311,7 +311,7 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo);
 
 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
-                                           struct dma_fence **ef);
+                                           struct dma_fence __rcu **ef);
 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
                                              struct kfd_vm_fault_info *info);
 int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
index 469785d337911a093cba2620cfd6e7fd77d207bd..1ef758ac5076ef9a56ba5191ac38964897564912 100644 (file)
@@ -90,7 +90,7 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
                return NULL;
 
        fence = container_of(f, struct amdgpu_amdkfd_fence, base);
-       if (fence && f->ops == &amdkfd_fence_ops)
+       if (f->ops == &amdkfd_fence_ops)
                return fence;
 
        return NULL;
index d17b2452cb1f69df276dd95518cf0ca340539237..f183d7faeeece16cfc7c211f5a6a0232dce37c36 100644 (file)
@@ -2802,7 +2802,7 @@ unlock_out:
        put_task_struct(usertask);
 }
 
-static void replace_eviction_fence(struct dma_fence **ef,
+static void replace_eviction_fence(struct dma_fence __rcu **ef,
                                   struct dma_fence *new_ef)
 {
        struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
@@ -2837,7 +2837,7 @@ static void replace_eviction_fence(struct dma_fence **ef,
  * 7.  Add fence to all PD and PT BOs.
  * 8.  Unreserve all BOs
  */
-int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
+int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef)
 {
        struct amdkfd_process_info *process_info = info;
        struct amdgpu_vm *peer_vm;
index 96f63fd39b9e1c3922488dd6527dff78e2f35799..9caba10315a8137dc7ed03efc8b60250d6e0feaa 100644 (file)
@@ -1103,7 +1103,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
                         * DDC line.  The latter is more complex because with DVI<->HDMI adapters
                         * you don't really know what's connected to which port as both are digital.
                         */
-                        amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector);
+                       amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector);
                }
        }
 
index 440e9a6786fcc91e6db89e7b1f179f298b79a4f6..6adeddfb3d5643bebe7366094ce8f4ba00dcdfb2 100644 (file)
@@ -870,9 +870,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                struct amdgpu_bo *bo = e->bo;
                int i;
 
-               e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
-                                       sizeof(struct page *),
-                                       GFP_KERNEL | __GFP_ZERO);
+               e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
+                                        sizeof(struct page *),
+                                        GFP_KERNEL);
                if (!e->user_pages) {
                        DRM_ERROR("kvmalloc_array failure\n");
                        r = -ENOMEM;
index 2cebf2145d9a6e726357a89924963cab461d740e..e485dd3357c63fd225b3fb7e3847675749f018da 100644 (file)
@@ -540,7 +540,11 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
        while (size) {
                uint32_t value;
 
-               value = RREG32_PCIE(*pos);
+               if (upper_32_bits(*pos))
+                       value = RREG32_PCIE_EXT(*pos);
+               else
+                       value = RREG32_PCIE(*pos);
+
                r = put_user(value, (uint32_t *)buf);
                if (r)
                        goto out;
@@ -600,7 +604,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
                if (r)
                        goto out;
 
-               WREG32_PCIE(*pos, value);
+               if (upper_32_bits(*pos))
+                       WREG32_PCIE_EXT(*pos, value);
+               else
+                       WREG32_PCIE(*pos, value);
 
                result += 4;
                buf += 4;
index 85ed0d66a0298b46fb157b3e36e4db59f9b553a9..b158d27d0a71cbbafb55f0d58657c1ec178fa6c2 100644 (file)
@@ -1544,6 +1544,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
                                return true;
 
                        fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+                       release_firmware(adev->pm.fw);
                        if (fw_ver < 0x00160e00)
                                return true;
                }
@@ -2251,15 +2252,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
 
        adev->firmware.gpu_info_fw = NULL;
 
-       if (adev->mman.discovery_bin) {
-               /*
-                * FIXME: The bounding box is still needed by Navi12, so
-                * temporarily read it from gpu_info firmware. Should be dropped
-                * when DAL no longer needs it.
-                */
-               if (adev->asic_type != CHIP_NAVI12)
-                       return 0;
-       }
+       if (adev->mman.discovery_bin)
+               return 0;
 
        switch (adev->asic_type) {
        default:
@@ -5252,7 +5246,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
        struct amdgpu_device *tmp_adev = NULL;
        bool need_full_reset, skip_hw_reset, vram_lost = false;
        int r = 0;
-       bool gpu_reset_for_dev_remove = 0;
 
        /* Try reset handler method first */
        tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
@@ -5272,10 +5265,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
                test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
        skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
 
-       gpu_reset_for_dev_remove =
-               test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
-                       test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
-
        /*
         * ASIC reset has to be done on all XGMI hive nodes ASAP
         * to allow proper links negotiation in FW (within 1 sec)
@@ -5318,18 +5307,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
                amdgpu_ras_intr_cleared();
        }
 
-       /* Since the mode1 reset affects base ip blocks, the
-        * phase1 ip blocks need to be resumed. Otherwise there
-        * will be a BIOS signature error and the psp bootloader
-        * can't load kdb on the next amdgpu install.
-        */
-       if (gpu_reset_for_dev_remove) {
-               list_for_each_entry(tmp_adev, device_list_handle, reset_list)
-                       amdgpu_device_ip_resume_phase1(tmp_adev);
-
-               goto end;
-       }
-
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                if (need_full_reset) {
                        /* post card */
@@ -5566,11 +5543,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        int i, r = 0;
        bool need_emergency_restart = false;
        bool audio_suspended = false;
-       bool gpu_reset_for_dev_remove = false;
-
-       gpu_reset_for_dev_remove =
-                       test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
-                               test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
 
        /*
         * Special case: RAS triggered and full reset isn't supported
@@ -5608,7 +5580,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
                list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
                        list_add_tail(&tmp_adev->reset_list, &device_list);
-                       if (gpu_reset_for_dev_remove && adev->shutdown)
+                       if (adev->shutdown)
                                tmp_adev->shutdown = true;
                }
                if (!list_is_first(&adev->reset_list, &device_list))
@@ -5693,10 +5665,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
 retry: /* Rest of adevs pre asic reset from XGMI hive. */
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
-               if (gpu_reset_for_dev_remove) {
-                       /* Workaroud for ASICs need to disable SMC first */
-                       amdgpu_device_smu_fini_early(tmp_adev);
-               }
                r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
                /*TODO Should we stop ?*/
                if (r) {
@@ -5728,9 +5696,6 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
                r = amdgpu_do_asic_reset(device_list_handle, reset_context);
                if (r && r == -EAGAIN)
                        goto retry;
-
-               if (!r && gpu_reset_for_dev_remove)
-                       goto recover_end;
        }
 
 skip_hw_reset:
@@ -5786,7 +5751,6 @@ skip_sched_resume:
                amdgpu_ras_set_error_query_ready(tmp_adev, true);
        }
 
-recover_end:
        tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
                                            reset_list);
        amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
index 0431eafa86b5324f4d63cc6060cea30baa03088b..c7d60dd0fb975d47d749300c79f976da15892736 100644 (file)
@@ -1963,8 +1963,6 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
                break;
        case IP_VERSION(9, 4, 3):
-               if (!amdgpu_exp_hw_support)
-                       return -EINVAL;
                amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
                break;
        case IP_VERSION(10, 1, 10):
index 880137774b4eb2649559e03cceba5ca53fd70c8d..cc69005f5b46e7b9f06d65db13287a617cc384e2 100644 (file)
@@ -128,6 +128,7 @@ enum AMDGPU_DEBUG_MASK {
        AMDGPU_DEBUG_VM = BIT(0),
        AMDGPU_DEBUG_LARGEBAR = BIT(1),
        AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
+       AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
 };
 
 unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -2117,6 +2118,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
                pr_info("debug: soft reset for GPU recovery disabled\n");
                adev->debug_disable_soft_recovery = true;
        }
+
+       if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) {
+               pr_info("debug: place fw in vram for frontdoor loading\n");
+               adev->debug_use_vram_fw_buf = true;
+       }
 }
 
 static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2228,6 +2234,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 
        pci_set_drvdata(pdev, ddev);
 
+       amdgpu_init_debug_options(adev);
+
        ret = amdgpu_driver_load_kms(adev, flags);
        if (ret)
                goto err_pci;
@@ -2308,8 +2316,6 @@ retry_init:
                        amdgpu_get_secondary_funcs(adev);
        }
 
-       amdgpu_init_debug_options(adev);
-
        return 0;
 
 err_pci:
@@ -2331,38 +2337,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
                pm_runtime_forbid(dev->dev);
        }
 
-       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) &&
-           !amdgpu_sriov_vf(adev)) {
-               bool need_to_reset_gpu = false;
-
-               if (adev->gmc.xgmi.num_physical_nodes > 1) {
-                       struct amdgpu_hive_info *hive;
-
-                       hive = amdgpu_get_xgmi_hive(adev);
-                       if (hive->device_remove_count == 0)
-                               need_to_reset_gpu = true;
-                       hive->device_remove_count++;
-                       amdgpu_put_xgmi_hive(hive);
-               } else {
-                       need_to_reset_gpu = true;
-               }
-
-               /* Workaround for ASICs need to reset SMU.
-                * Called only when the first device is removed.
-                */
-               if (need_to_reset_gpu) {
-                       struct amdgpu_reset_context reset_context;
-
-                       adev->shutdown = true;
-                       memset(&reset_context, 0, sizeof(reset_context));
-                       reset_context.method = AMD_RESET_METHOD_NONE;
-                       reset_context.reset_req_dev = adev;
-                       set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-                       set_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context.flags);
-                       amdgpu_device_gpu_recover(adev, NULL, &reset_context);
-               }
-       }
-
        amdgpu_driver_unload_kms(dev);
 
        /*
index d2f273d77e59557ba5185cbfa36e243788d3d86e..55784a9f26c4c83b17008a766130c234df8ecbaf 100644 (file)
@@ -1045,21 +1045,28 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
         * seconds, so here, we just pick up three parts for emulation.
         */
        ret = memcmp(vram_ptr, cptr, 10);
-       if (ret)
-               return ret;
+       if (ret) {
+               ret = -EIO;
+               goto release_buffer;
+       }
 
        ret = memcmp(vram_ptr + (size / 2), cptr, 10);
-       if (ret)
-               return ret;
+       if (ret) {
+               ret = -EIO;
+               goto release_buffer;
+       }
 
        ret = memcmp(vram_ptr + size - 10, cptr, 10);
-       if (ret)
-               return ret;
+       if (ret) {
+               ret = -EIO;
+               goto release_buffer;
+       }
 
+release_buffer:
        amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
                        &vram_ptr);
 
-       return 0;
+       return ret;
 }
 
 static ssize_t current_memory_partition_show(
index b5ebafd4a3adf82e37b29f9df84cbf6541955441..bf4f48fe438d1b5936852145c8b4c1059446381c 100644 (file)
@@ -1105,7 +1105,12 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        if (amdgpu_dpm_read_sensor(adev,
                                                   AMDGPU_PP_SENSOR_GPU_AVG_POWER,
                                                   (void *)&ui32, &ui32_size)) {
-                               return -EINVAL;
+                               /* fall back to input power for backwards compat */
+                               if (amdgpu_dpm_read_sensor(adev,
+                                                          AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+                                                          (void *)&ui32, &ui32_size)) {
+                                       return -EINVAL;
+                               }
                        }
                        ui32 >>= 8;
                        break;
index 210aea590a52ed7bc87c98776e5fd5107e0e7aea..59fafb8392e0bae775e721e166aac800dfbdc98c 100644 (file)
@@ -218,6 +218,7 @@ static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, st
 int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data)
 {
        struct amdgpu_smuio_mcm_config_info mcm_info;
+       struct ras_err_addr err_addr = {0};
        struct mca_bank_set mca_set;
        struct mca_bank_node *node;
        struct mca_bank_entry *entry;
@@ -246,10 +247,18 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
                mcm_info.socket_id = entry->info.socket_id;
                mcm_info.die_id = entry->info.aid;
 
+               if (blk == AMDGPU_RAS_BLOCK__UMC) {
+                       err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
+                       err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
+                       err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
+               }
+
                if (type == AMDGPU_MCA_ERROR_TYPE_UE)
-                       amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, (uint64_t)count);
+                       amdgpu_ras_error_statistic_ue_count(err_data,
+                               &mcm_info, &err_addr, (uint64_t)count);
                else
-                       amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, (uint64_t)count);
+                       amdgpu_ras_error_statistic_ce_count(err_data,
+                               &mcm_info, &err_addr, (uint64_t)count);
        }
 
 out_mca_release:
@@ -351,6 +360,9 @@ int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_err
        const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
        int count;
 
+       if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
+               return -EOPNOTSUPP;
+
        switch (type) {
        case AMDGPU_MCA_ERROR_TYPE_UE:
                count = mca_funcs->max_ue_count;
@@ -365,10 +377,7 @@ int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_err
        if (idx >= count)
                return -EINVAL;
 
-       if (mca_funcs && mca_funcs->mca_get_mca_entry)
-               return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
-
-       return -EOPNOTSUPP;
+       return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
 }
 
 #if defined(CONFIG_DEBUG_FS)
index 1bf975b8d083edb68a3790ac74c76b66d430bd00..0328616473f80af861cd4a1176afc0221eee7db9 100644 (file)
@@ -466,7 +466,7 @@ static int psp_sw_init(void *handle)
        }
 
        ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
-                                     amdgpu_sriov_vf(adev) ?
+                                     (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
                                      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
                                      &psp->fw_pri_bo,
                                      &psp->fw_pri_mc_addr,
index bacb59d8b701a6813b795ed67d4d8f98be60a784..31823a30dea217b5af3a8a36624a01fab70b48a5 100644 (file)
@@ -305,11 +305,13 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
                        return -EINVAL;
 
                data->head.block = block_id;
-               /* only ue and ce errors are supported */
+               /* only ue, ce and poison errors are supported */
                if (!memcmp("ue", err, 2))
                        data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                else if (!memcmp("ce", err, 2))
                        data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+               else if (!memcmp("poison", err, 6))
+                       data->head.type = AMDGPU_RAS_ERROR__POISON;
                else
                        return -EINVAL;
 
@@ -431,9 +433,10 @@ static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
  * The block is one of: umc, sdma, gfx, etc.
  *     see ras_block_string[] for details
  *
- * The error type is one of: ue, ce, where,
+ * The error type is one of: ue, ce and poison where,
  *     ue is multi-uncorrectable
  *     ce is single-correctable
+ *     poison is poison
  *
  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
  * The address and value are hexadecimal numbers, leading 0x is optional.
@@ -1067,8 +1070,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
                        mcm_info = &err_info->mcm_info;
                        if (err_info->ce_count) {
                                dev_info(adev->dev, "socket: %d, die: %d, "
-                                        "%lld new correctable hardware errors detected in %s block, "
-                                        "no user action is needed\n",
+                                        "%lld new correctable hardware errors detected in %s block\n",
                                         mcm_info->socket_id,
                                         mcm_info->die_id,
                                         err_info->ce_count,
@@ -1080,8 +1082,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
                        err_info = &err_node->err_info;
                        mcm_info = &err_info->mcm_info;
                        dev_info(adev->dev, "socket: %d, die: %d, "
-                                "%lld correctable hardware errors detected in total in %s block, "
-                                "no user action is needed\n",
+                                "%lld correctable hardware errors detected in total in %s block\n",
                                 mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
                }
        }
@@ -1108,16 +1109,14 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
                           adev->smuio.funcs->get_die_id) {
                        dev_info(adev->dev, "socket: %d, die: %d "
                                 "%ld correctable hardware errors "
-                                "detected in %s block, no user "
-                                "action is needed.\n",
+                                "detected in %s block\n",
                                 adev->smuio.funcs->get_socket_id(adev),
                                 adev->smuio.funcs->get_die_id(adev),
                                 ras_mgr->err_data.ce_count,
                                 blk_name);
                } else {
                        dev_info(adev->dev, "%ld correctable hardware errors "
-                                "detected in %s block, no user "
-                                "action is needed.\n",
+                                "detected in %s block\n",
                                 ras_mgr->err_data.ce_count,
                                 blk_name);
                }
@@ -1156,8 +1155,10 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
                for_each_ras_error(err_node, err_data) {
                        err_info = &err_node->err_info;
 
-                       amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, err_info->ce_count);
-                       amdgpu_ras_error_statistic_ue_count(&obj->err_data, &err_info->mcm_info, err_info->ue_count);
+                       amdgpu_ras_error_statistic_ce_count(&obj->err_data,
+                                       &err_info->mcm_info, NULL, err_info->ce_count);
+                       amdgpu_ras_error_statistic_ue_count(&obj->err_data,
+                                       &err_info->mcm_info, NULL, err_info->ue_count);
                }
        } else {
                /* for legacy asic path which doesn't has error source info */
@@ -1174,6 +1175,9 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
        enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
        struct amdgpu_ras_block_object *block_obj = NULL;
 
+       if (blk == AMDGPU_RAS_BLOCK_COUNT)
+               return -EINVAL;
+
        if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
                return -EINVAL;
 
@@ -1915,7 +1919,7 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj
                                struct amdgpu_iv_entry *entry)
 {
        dev_info(obj->adev->dev,
-               "Poison is created, no user action is needed.\n");
+               "Poison is created\n");
 }
 
 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
@@ -2538,7 +2542,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
                return 0;
 
        data = &con->eh_data;
-       *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
+       *data = kzalloc(sizeof(**data), GFP_KERNEL);
        if (!*data) {
                ret = -ENOMEM;
                goto out;
@@ -2825,10 +2829,10 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
        if (con)
                return 0;
 
-       con = kmalloc(sizeof(struct amdgpu_ras) +
+       con = kzalloc(sizeof(*con) +
                        sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
                        sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
-                       GFP_KERNEL|__GFP_ZERO);
+                       GFP_KERNEL);
        if (!con)
                return -ENOMEM;
 
@@ -2915,6 +2919,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
 
        amdgpu_ras_query_poison_mode(adev);
 
+       /* Packed socket_id to ras feature mask bits[31:29] */
+       if (adev->smuio.funcs &&
+           adev->smuio.funcs->get_socket_id)
+               con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29);
+
        /* Get RAS schema for particular SOC */
        con->schema = amdgpu_get_ras_schema(adev);
 
@@ -3133,8 +3142,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
        if (amdgpu_sriov_vf(adev))
                return 0;
 
-       /* enable MCA debug on APU device */
-       amdgpu_ras_set_mca_debug_mode(adev, !!(adev->flags & AMD_IS_APU));
+       amdgpu_ras_set_mca_debug_mode(adev, false);
 
        list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
                if (!node->ras_obj) {
@@ -3691,7 +3699,8 @@ static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct
 }
 
 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
-                                                     struct amdgpu_smuio_mcm_config_info *mcm_info)
+                               struct amdgpu_smuio_mcm_config_info *mcm_info,
+                               struct ras_err_addr *err_addr)
 {
        struct ras_err_node *err_node;
 
@@ -3705,6 +3714,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
 
        memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
 
+       if (err_addr)
+               memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr));
+
        err_data->err_list_count++;
        list_add_tail(&err_node->node, &err_data->err_node_list);
        list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
@@ -3713,7 +3725,8 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
 }
 
 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
-                                       struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count)
+               struct amdgpu_smuio_mcm_config_info *mcm_info,
+               struct ras_err_addr *err_addr, u64 count)
 {
        struct ras_err_info *err_info;
 
@@ -3723,7 +3736,7 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
        if (!count)
                return 0;
 
-       err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+       err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
        if (!err_info)
                return -EINVAL;
 
@@ -3734,7 +3747,8 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
 }
 
 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
-                                       struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count)
+               struct amdgpu_smuio_mcm_config_info *mcm_info,
+               struct ras_err_addr *err_addr, u64 count)
 {
        struct ras_err_info *err_info;
 
@@ -3744,7 +3758,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
        if (!count)
                return 0;
 
-       err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+       err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
        if (!err_info)
                return -EINVAL;
 
index 6a941eb8fb8fd77e948e1bc170309e4a1418f2a2..76fb85628716f6302b3c02beb0965c85f2723a05 100644 (file)
@@ -452,10 +452,17 @@ struct ras_fs_data {
        char debugfs_name[32];
 };
 
+struct ras_err_addr {
+       uint64_t err_status;
+       uint64_t err_ipid;
+       uint64_t err_addr;
+};
+
 struct ras_err_info {
        struct amdgpu_smuio_mcm_config_info mcm_info;
        u64 ce_count;
        u64 ue_count;
+       struct ras_err_addr err_addr;
 };
 
 struct ras_err_node {
@@ -806,8 +813,10 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
 int amdgpu_ras_error_data_init(struct ras_err_data *err_data);
 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
-                                       struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count);
+               struct amdgpu_smuio_mcm_config_info *mcm_info,
+               struct ras_err_addr *err_addr, u64 count);
 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
-                                       struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count);
+               struct amdgpu_smuio_mcm_config_info *mcm_info,
+               struct ras_err_addr *err_addr, u64 count);
 
 #endif
index b0335a1c5e90cb8f000fe1989bfb20dfbbd53c58..19899f6b9b2b419a0fdf2ed84c71f0278963f511 100644 (file)
@@ -32,7 +32,6 @@ enum AMDGPU_RESET_FLAGS {
 
        AMDGPU_NEED_FULL_RESET = 0,
        AMDGPU_SKIP_HW_RESET = 1,
-       AMDGPU_RESET_FOR_DEVICE_REMOVE = 2,
 };
 
 struct amdgpu_reset_context {
index 35e0ae9acadcd616a056c73dd96960c3f8937569..2c3675d91614f13b21e35a08d57c5fc7bca32090 100644 (file)
@@ -531,13 +531,12 @@ int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
        if (version_major == 2 && version_minor == 1)
                adev->gfx.rlc.is_rlc_v2_1 = true;
 
-       if (version_minor >= 0) {
-               err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
-               if (err) {
-                       dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
-                       return err;
-               }
+       err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
+       if (err) {
+               dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
+               return err;
        }
+
        if (version_minor >= 1)
                amdgpu_gfx_rlc_init_microcode_v2_1(adev);
        if (version_minor >= 2)
index dcd8c066bc1f5024814853a9f9689b3c8eef4132..1b013a44ca99af7a51bee203b7f91c738b3500df 100644 (file)
@@ -191,7 +191,8 @@ static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
 
        /* Never sync to VM updates either. */
        if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
-           owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+           owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+           owner != AMDGPU_FENCE_OWNER_KFD)
                return false;
 
        /* Ignore fences depending on the sync mode */
index b14127429f3036e060afde459d8627734afd5a60..3e12763e477aa45724d0c16a1b514a5a299a76a9 100644 (file)
@@ -1062,7 +1062,8 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
 {
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
                amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
-                       amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+                       (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
+                       AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
                        &adev->firmware.fw_buf,
                        &adev->firmware.fw_buf_mc,
                        &adev->firmware.fw_buf_ptr);
@@ -1397,9 +1398,13 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
 
        if (err)
                return -ENODEV;
+
        err = amdgpu_ucode_validate(*fw);
-       if (err)
+       if (err) {
                dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
+               release_firmware(*fw);
+               *fw = NULL;
+       }
 
        return err;
 }
index 7da71b6a9dc6a2341911da0c807f2f34e9fc9a31..b8fcb6c55698934549c6696a337e749f0e268217 100644 (file)
@@ -285,6 +285,7 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
        list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
                struct amdgpu_bo *bo = vm_bo->bo;
 
+               vm_bo->moved = true;
                if (!bo || bo->tbo.type != ttm_bo_type_kernel)
                        list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
                else if (bo->parent)
index b6cd565562ad8d9a99270757fc2b37352600d2f3..4740dd65b99d6ccc107e5d63aba0f0d67d02d718 100644 (file)
@@ -116,7 +116,7 @@ struct amdgpu_mem_stats;
 #define AMDGPU_VM_FAULT_STOP_FIRST     1
 #define AMDGPU_VM_FAULT_STOP_ALWAYS    2
 
-/* Reserve 4MB VRAM for page tables */
+/* How much VRAM be reserved for page tables */
 #define AMDGPU_VM_RESERVED_VRAM                (8ULL << 20)
 
 /*
index 6f149b54d4d3970c5fe0a8255f8f7a080433381a..b9a15d51eb5c30e554d4e4f7c1397e3ce51996d9 100644 (file)
@@ -59,11 +59,8 @@ static inline uint16_t complete_integer_division_u16(
 
 static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
 {
-       bool arg1_negative = numerator < 0;
-       bool arg2_negative = denominator < 0;
-
-       uint16_t arg1_value = (uint16_t)(arg1_negative ? -numerator : numerator);
-       uint16_t arg2_value = (uint16_t)(arg2_negative ? -denominator : denominator);
+       u16 arg1_value = numerator;
+       u16 arg2_value = denominator;
 
        uint16_t remainder;
 
@@ -100,9 +97,6 @@ static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
                res_value += summand;
        }
 
-       if (arg1_negative ^ arg2_negative)
-               res_value = -res_value;
-
        return res_value;
 }
 
index 9a95b9f226b85a5c711cd4f21e28dc36021611dd..a6c88f2fe6e5750ea42d153dcfb855046d1a5b25 100644 (file)
@@ -1313,10 +1313,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
 
        switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
        case AMDGPU_MCA_ERROR_TYPE_UE:
-               amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
+               amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
                break;
        case AMDGPU_MCA_ERROR_TYPE_CE:
-               amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
+               amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
                break;
        default:
                break;
index 6cab882e8061e80f33bca5eb1a7b59d8cf0a687f..1592c63b3099b982d0b9bdda596919da8ec14f5f 100644 (file)
@@ -43,7 +43,6 @@ struct amdgpu_hive_info {
        } pstate;
 
        struct amdgpu_reset_domain *reset_domain;
-       uint32_t device_remove_count;
        atomic_t ras_recovery;
 };
 
index f0737fb3a999e03a44eb3c08f6e0099e3326c929..d1bba9c64e16d808fbaafd4e01d8764cb77b1a86 100644 (file)
@@ -30,6 +30,8 @@
 
 #define regATHUB_MISC_CNTL_V3_0_1                      0x00d7
 #define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX             0
+#define regATHUB_MISC_CNTL_V3_3_0                      0x00d8
+#define regATHUB_MISC_CNTL_V3_3_0_BASE_IDX             0
 
 
 static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
@@ -40,6 +42,9 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
        case IP_VERSION(3, 0, 1):
                data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
                break;
+       case IP_VERSION(3, 3, 0):
+               data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0);
+               break;
        default:
                data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
                break;
@@ -53,6 +58,9 @@ static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
        case IP_VERSION(3, 0, 1):
                WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
                break;
+       case IP_VERSION(3, 3, 0):
+               WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0, data);
+               break;
        default:
                WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
                break;
index 2c221000782cdffa5c57aeb2b464db0836895f8c..a33e890c70d904a2be4f0f33cf9a194e053b70be 100644 (file)
@@ -395,7 +395,6 @@ static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
                        (*ptr)++;
                        return;
                }
-               return;
        }
 }
 
index 73f6d7e72c737537f17264746b061a936b4960e5..d63cab294883b8b44caa908d5bafaeaf19750ef6 100644 (file)
@@ -3996,16 +3996,13 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
 
        if (!amdgpu_sriov_vf(adev)) {
                snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
-               err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
-               /* don't check this.  There are apparently firmwares in the wild with
-                * incorrect size in the header
-                */
-               if (err == -ENODEV)
-                       goto out;
+               err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
                if (err)
-                       dev_dbg(adev->dev,
-                               "gfx10: amdgpu_ucode_request() failed \"%s\"\n",
-                               fw_name);
+                       goto out;
+
+               /* don't validate this firmware. There are apparently firmwares
+                * in the wild with incorrect size in the header
+                */
                rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
                version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
                version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
index bdcf96df69e6b28ade574a1107f76c6267e0bc9b..0ea0866c261f84e24e8494755387b3d22482a0a2 100644 (file)
@@ -115,7 +115,7 @@ static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
        SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f),
        SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x8000b007),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007),
        SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007),
        SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
@@ -4474,11 +4474,43 @@ static int gfx_v11_0_wait_for_idle(void *handle)
        return -ETIMEDOUT;
 }
 
+static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+                                            int req)
+{
+       u32 i, tmp, val;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               /* Request with MeId=2, PipeId=0 */
+               tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
+               tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
+               WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
+
+               val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
+               if (req) {
+                       if (val == tmp)
+                               break;
+               } else {
+                       tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
+                                           REQUEST, 1);
+
+                       /* unlocked or locked by firmware */
+                       if (val != tmp)
+                               break;
+               }
+               udelay(1);
+       }
+
+       if (i >= adev->usec_timeout)
+               return -EINVAL;
+
+       return 0;
+}
+
 static int gfx_v11_0_soft_reset(void *handle)
 {
        u32 grbm_soft_reset = 0;
        u32 tmp;
-       int i, j, k;
+       int r, i, j, k;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
@@ -4518,6 +4550,13 @@ static int gfx_v11_0_soft_reset(void *handle)
                }
        }
 
+       /* Try to acquire the gfx mutex before access to CP_VMID_RESET */
+       r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
+       if (r) {
+               DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
+               return r;
+       }
+
        WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
 
        // Read CP_VMID_RESET register three times.
@@ -4526,6 +4565,13 @@ static int gfx_v11_0_soft_reset(void *handle)
        RREG32_SOC15(GC, 0, regCP_VMID_RESET);
        RREG32_SOC15(GC, 0, regCP_VMID_RESET);
 
+       /* release the gfx mutex */
+       r = gfx_v11_0_request_gfx_index_mutex(adev, 0);
+       if (r) {
+               DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
+               return r;
+       }
+
        for (i = 0; i < adev->usec_timeout; i++) {
                if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
                    !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
@@ -6337,6 +6383,9 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       bitmap = i * adev->gfx.config.max_sh_per_se + j;
+                       if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
+                               continue;
                        mask = 1;
                        counter = 0;
                        gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
index 00b21ece081f96b4470d4f4ba2163148c71354ea..131cddbdda0dc11716205307e51d72aa72b271bf 100644 (file)
@@ -3828,8 +3828,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
        /* the caller should make sure initialize value of
         * err_data->ue_count and err_data->ce_count
         */
-       amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
-       amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+       amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+       amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
 }
 
 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
index 53a2ba5fcf4ba3c2865b0abcf6a7592f24e4ea10..22175da0e16afefef1fd4df7a0afbfdf3c63f688 100644 (file)
@@ -102,7 +102,9 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
                WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
                        min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
-               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+               if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+                                      AMD_APU_IS_RENOIR |
+                                      AMD_APU_IS_GREEN_SARDINE))
                       /*
                        * Raven2 has a HW issue that it is unable to use the
                        * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
index 55423ff1bb4926d78003ad33dc4b3ceec0591948..49aecdcee006959491e4dba90058faf35e205fdb 100644 (file)
@@ -139,7 +139,9 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
                        WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
                                min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
-                       if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+                       if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+                                              AMD_APU_IS_RENOIR |
+                                              AMD_APU_IS_GREEN_SARDINE))
                               /*
                                * Raven2 has a HW issue that it is unable to use the
                                * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
@@ -454,10 +456,12 @@ static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
                WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
 
                /* Setup L2 cache */
-               tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
-               tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
-               WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
-               WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+               if (!amdgpu_sriov_vf(adev)) {
+                       tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
+                       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+                       WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
+                       WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+               }
        }
 }
 
index a5a05c16c10d7be2ea1b86fbdcf76699551a8fd8..6c51856088546faed3c2e3d9376f8c23d54ba554 100644 (file)
@@ -1041,6 +1041,10 @@ static int gmc_v10_0_hw_fini(void *handle)
 
        amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 
+       if (adev->gmc.ecc_irq.funcs &&
+               amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+               amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
        return 0;
 }
 
index 23d7b548d13f446766c0adc6f051f9b492111efb..c9c653cfc765b8b88e5ab1f77cefcbbce38ff79c 100644 (file)
@@ -941,6 +941,11 @@ static int gmc_v11_0_hw_fini(void *handle)
        }
 
        amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+
+       if (adev->gmc.ecc_irq.funcs &&
+               amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+               amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
        gmc_v11_0_gart_disable(adev);
 
        return 0;
index 473a774294cee76356717f7eb5e3ddaabbf76c11..f9039d64ff2d72804556daa16b8ed9632b08b307 100644 (file)
@@ -2380,6 +2380,10 @@ static int gmc_v9_0_hw_fini(void *handle)
 
        amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 
+       if (adev->gmc.ecc_irq.funcs &&
+               amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+               amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
        return 0;
 }
 
index 843219a91736093b4b4a6242a75961816d7ec3c4..e3ddd22aa1728eaf89b4ba38657b1816371c5288 100644 (file)
@@ -96,7 +96,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
                     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
-       if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+       if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+                              AMD_APU_IS_RENOIR |
+                              AMD_APU_IS_GREEN_SARDINE))
                /*
                 * Raven2 has a HW issue that it is unable to use the vram which
                 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
index 9b0146732e13ced30b38336fc76e0d46922ff77e..fb53aacdcba20f01019a20d63c7bb07d60e1e8d1 100644 (file)
@@ -652,8 +652,8 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
                                        AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
                                        &ue_count);
 
-       amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
-       amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+       amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
+       amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
 }
 
 static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
index 6d24c84924cb5dd646ddaa69bb91a3193493b5f4..19986ff6a48d7e773dcc892b9dccd585fe69c306 100644 (file)
@@ -401,8 +401,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
 
                        if (err_data.ce_count)
                                dev_info(adev->dev, "%ld correctable hardware "
-                                               "errors detected in %s block, "
-                                               "no user action is needed.\n",
+                                               "errors detected in %s block\n",
                                                obj->err_data.ce_count,
                                                get_ras_block_str(adev->nbio.ras_if));
 
index 25a3da83e0fb97e5949221d17e3fcd63062dd29c..e90f33780803458c32843f2599c07e4f598ca659 100644 (file)
@@ -597,8 +597,7 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
 
                        if (err_data.ce_count)
                                dev_info(adev->dev, "%ld correctable hardware "
-                                               "errors detected in %s block, "
-                                               "no user action is needed.\n",
+                                               "errors detected in %s block\n",
                                                obj->err_data.ce_count,
                                                get_ras_block_str(adev->nbio.ras_if));
 
index 0f24af6f28102bc490d6bc2ecdc890294e5f1905..2d688dca26bedba5018bd41c76fb09a65a38cd66 100644 (file)
@@ -2156,7 +2156,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
                                        AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
                                        &ue_count);
 
-       amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+       amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
 }
 
 static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
index e9c2ff74f0bc1d6f530a5433b2383072289b0940..7458a218e89db1dc98211c83c864e29cd81ab7af 100644 (file)
@@ -26,6 +26,7 @@
 #include "amdgpu.h"
 #include "umc/umc_12_0_0_offset.h"
 #include "umc/umc_12_0_0_sh_mask.h"
+#include "mp/mp_13_0_6_sh_mask.h"
 
 const uint32_t
        umc_v12_0_channel_idx_tbl[]
@@ -88,16 +89,26 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
                umc_v12_0_reset_error_count_per_channel, NULL);
 }
 
-bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status)
+bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
 {
+       if (amdgpu_ras_is_poison_mode_supported(adev) &&
+           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
+               return true;
+
        return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
                (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
                REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
                REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
 }
 
-bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
+bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
 {
+       if (amdgpu_ras_is_poison_mode_supported(adev) &&
+           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
+               return false;
+
        return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
                (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
                (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
@@ -105,7 +116,7 @@ bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
                /* Identify data parity error in replay mode */
                ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 ||
                REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) &&
-               !(umc_v12_0_is_uncorrectable_error(mc_umc_status)))));
+               !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
 }
 
 static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
@@ -124,7 +135,7 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
        mc_umc_status =
                RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
 
-       if (umc_v12_0_is_correctable_error(mc_umc_status))
+       if (umc_v12_0_is_correctable_error(adev, mc_umc_status))
                *error_count += 1;
 }
 
@@ -142,7 +153,7 @@ static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev
        mc_umc_status =
                RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
 
-       if (umc_v12_0_is_uncorrectable_error(mc_umc_status))
+       if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))
                *error_count += 1;
 }
 
@@ -166,8 +177,8 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
        umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count);
        umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count);
 
-       amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
-       amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+       amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+       amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
 
        return 0;
 }
@@ -360,6 +371,59 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
        return 0;
 }
 
+static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
+                                       void *ras_error_status)
+{
+       amdgpu_mca_smu_log_ras_error(adev,
+               AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status);
+       amdgpu_mca_smu_log_ras_error(adev,
+               AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status);
+}
+
+static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
+                                       void *ras_error_status)
+{
+       struct ras_err_node *err_node;
+       uint64_t mc_umc_status;
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       for_each_ras_error(err_node, err_data) {
+               mc_umc_status = err_node->err_info.err_addr.err_status;
+               if (!mc_umc_status)
+                       continue;
+
+               if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) {
+                       uint64_t mca_addr, err_addr, mca_ipid;
+                       uint32_t InstanceIdLo;
+                       struct amdgpu_smuio_mcm_config_info *mcm_info;
+
+                       mcm_info = &err_node->err_info.mcm_info;
+                       mca_addr = err_node->err_info.err_addr.err_addr;
+                       mca_ipid = err_node->err_info.err_addr.err_ipid;
+
+                       err_addr =  REG_GET_FIELD(mca_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+                       InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
+
+                       dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
+                               mca_ipid,
+                               mcm_info->die_id,
+                               MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+                               MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+                               err_addr);
+
+                       umc_v12_0_convert_error_address(adev,
+                               err_data, err_addr,
+                               MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+                               MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+                               mcm_info->die_id);
+
+                       /* Clear umc error address content */
+                       memset(&err_node->err_info.err_addr,
+                               0, sizeof(err_node->err_info.err_addr));
+               }
+       }
+}
+
 static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
 {
        amdgpu_umc_loop_channels(adev,
@@ -386,4 +450,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
        },
        .err_cnt_init = umc_v12_0_err_cnt_init,
        .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
+       .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count,
+       .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address,
 };
index b34b1e358f8b823f439cfa437b870726aba6984b..e8de3a92251a2c0070345ce89c317032a386d45b 100644 (file)
                (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
        } while (0)
 
-bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status);
-bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status);
+#define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \
+                       (((_ipid_lo) >> 12) & 0xF))
+#define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+
+bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
+bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
 
 extern const uint32_t
        umc_v12_0_channel_idx_tbl[]
index 530549314ce46c541a192305d1a7e1db17f11ebf..a3ee3c4c650febb4ca6fa61c9b7b5b51f16ce60c 100644 (file)
@@ -64,7 +64,7 @@ static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
        uint64_t reg_value;
 
        if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
-               dev_info(adev->dev, "Deferred error, no user action is needed.\n");
+               dev_info(adev->dev, "Deferred error\n");
 
        if (mc_umc_status)
                dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
index 62b205dac63a05aa6e6b52b6407bf7f576cf4ff0..6604a3f99c5ecfd016e04230cde43d02e439c7d9 100644 (file)
@@ -330,12 +330,6 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
        pdd->gpuvm_limit =
                pdd->dev->kfd->shared_resources.gpuvm_size - 1;
 
-       /* dGPUs: the reserved space for kernel
-        * before SVM
-        */
-       pdd->qpd.cwsr_base = SVM_CWSR_BASE;
-       pdd->qpd.ib_base = SVM_IB_BASE;
-
        pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
        pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
 }
@@ -345,18 +339,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
        pdd->lds_base = MAKE_LDS_APP_BASE_V9();
        pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
 
-       pdd->gpuvm_base = PAGE_SIZE;
+        /* Raven needs SVM to support graphic handle, etc. Leave the small
+         * reserved space before SVM on Raven as well, even though we don't
+         * have to.
+         * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
+         * are used in Thunk to reserve SVM.
+         */
+        pdd->gpuvm_base = SVM_USER_BASE;
        pdd->gpuvm_limit =
                pdd->dev->kfd->shared_resources.gpuvm_size - 1;
 
        pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
        pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
-
-       /*
-        * Place TBA/TMA on opposite side of VM hole to prevent
-        * stray faults from triggering SVM on these pages.
-        */
-       pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size;
 }
 
 int kfd_init_apertures(struct kfd_process *process)
@@ -413,6 +407,12 @@ int kfd_init_apertures(struct kfd_process *process)
                                        return -EINVAL;
                                }
                        }
+
+                        /* dGPUs: the reserved space for kernel
+                         * before SVM
+                         */
+                        pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+                        pdd->qpd.ib_base = SVM_IB_BASE;
                }
 
                dev_dbg(kfd_device, "node id %u\n", id);
index b854cbf06dcee9144b77d7eff71ea1a9843b148b..f856901055d34e605cd4ec51fbdfc3be18e2abeb 100644 (file)
@@ -260,19 +260,6 @@ static void svm_migrate_put_sys_page(unsigned long addr)
        put_page(page);
 }
 
-static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
-{
-       unsigned long cpages = 0;
-       unsigned long i;
-
-       for (i = 0; i < migrate->npages; i++) {
-               if (migrate->src[i] & MIGRATE_PFN_VALID &&
-                   migrate->src[i] & MIGRATE_PFN_MIGRATE)
-                       cpages++;
-       }
-       return cpages;
-}
-
 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
 {
        unsigned long upages = 0;
@@ -402,6 +389,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
        struct dma_fence *mfence = NULL;
        struct migrate_vma migrate = { 0 };
        unsigned long cpages = 0;
+       unsigned long mpages = 0;
        dma_addr_t *scratch;
        void *buf;
        int r = -ENOMEM;
@@ -450,12 +438,13 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
        r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
        migrate_vma_pages(&migrate);
 
-       pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
-               svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
-
        svm_migrate_copy_done(adev, mfence);
        migrate_vma_finalize(&migrate);
 
+       mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
+       pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+                        mpages, cpages, migrate.npages);
+
        kfd_smi_event_migration_end(node, p->lead_thread->pid,
                                    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
                                    0, node->id, trigger);
@@ -465,12 +454,12 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
 out_free:
        kvfree(buf);
 out:
-       if (!r && cpages) {
+       if (!r && mpages) {
                pdd = svm_range_get_pdd_by_node(prange, node);
                if (pdd)
-                       WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
+                       WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
 
-               return cpages;
+               return mpages;
        }
        return r;
 }
@@ -498,7 +487,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
        struct vm_area_struct *vma;
        uint64_t ttm_res_offset;
        struct kfd_node *node;
-       unsigned long cpages = 0;
+       unsigned long mpages = 0;
        long r = 0;
 
        if (start_mgr < prange->start || last_mgr > prange->last) {
@@ -540,15 +529,15 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
                        pr_debug("failed %ld to migrate\n", r);
                        break;
                } else {
-                       cpages += r;
+                       mpages += r;
                }
                ttm_res_offset += next - addr;
                addr = next;
        }
 
-       if (cpages) {
+       if (mpages) {
                prange->actual_loc = best_loc;
-               prange->vram_pages = prange->vram_pages + cpages;
+               prange->vram_pages += mpages;
        } else if (!prange->actual_loc) {
                /* if no page migrated and all pages from prange are at
                 * sys ram drop svm_bo got from svm_range_vram_node_new
@@ -1037,7 +1026,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
        } else {
                res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
                if (IS_ERR(res))
-                       return -ENOMEM;
+                       return PTR_ERR(res);
                pgmap->range.start = res->start;
                pgmap->range.end = res->end;
                pgmap->type = MEMORY_DEVICE_PRIVATE;
@@ -1053,10 +1042,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
        r = devm_memremap_pages(adev->dev, pgmap);
        if (IS_ERR(r)) {
                pr_err("failed to register HMM device memory\n");
-               /* Disable SVM support capability */
-               pgmap->type = 0;
                if (pgmap->type == MEMORY_DEVICE_PRIVATE)
                        devm_release_mem_region(adev->dev, res->start, resource_size(res));
+               /* Disable SVM support capability */
+               pgmap->type = 0;
                return PTR_ERR(r);
        }
 
index 45366b4ca97694cb87c87cad40dc54c8d4895e94..17fbedbf3651388edfcd0109a22d0fe9dfcd331f 100644 (file)
@@ -917,7 +917,7 @@ struct kfd_process {
         * fence will be triggered during eviction and new one will be created
         * during restore
         */
-       struct dma_fence *ef;
+       struct dma_fence __rcu *ef;
 
        /* Work items for evicting and restoring BOs */
        struct delayed_work eviction_work;
@@ -970,7 +970,7 @@ struct kfd_process {
        struct work_struct debug_event_workarea;
 
        /* Tracks debug per-vmid request for debug flags */
-       bool dbg_flags;
+       u32 dbg_flags;
 
        atomic_t poison;
        /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
index 71df51fcc1b0d80f42899a0e15ae454b3f03f2bc..717a60d7a4ea953b8dfc369b09d855ad74b49659 100644 (file)
@@ -1110,6 +1110,7 @@ static void kfd_process_wq_release(struct work_struct *work)
 {
        struct kfd_process *p = container_of(work, struct kfd_process,
                                             release_work);
+       struct dma_fence *ef;
 
        kfd_process_dequeue_from_all_devices(p);
        pqm_uninit(&p->pqm);
@@ -1118,7 +1119,9 @@ static void kfd_process_wq_release(struct work_struct *work)
         * destroyed. This allows any BOs to be freed without
         * triggering pointless evictions or waiting for fences.
         */
-       dma_fence_signal(p->ef);
+       synchronize_rcu();
+       ef = rcu_access_pointer(p->ef);
+       dma_fence_signal(ef);
 
        kfd_process_remove_sysfs(p);
 
@@ -1127,7 +1130,7 @@ static void kfd_process_wq_release(struct work_struct *work)
        svm_range_list_fini(p);
 
        kfd_process_destroy_pdds(p);
-       dma_fence_put(p->ef);
+       dma_fence_put(ef);
 
        kfd_event_free_process(p);
 
index 70ca761bd60be2969cfba43c02019bcb20fd6b04..c50a0dc9c9c072f5692d003bce90aaaf13615c5d 100644 (file)
@@ -158,13 +158,12 @@ svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
 static int
 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
                      unsigned long offset, unsigned long npages,
-                     unsigned long *hmm_pfns, uint32_t gpuidx, uint64_t *vram_pages)
+                     unsigned long *hmm_pfns, uint32_t gpuidx)
 {
        enum dma_data_direction dir = DMA_BIDIRECTIONAL;
        dma_addr_t *addr = prange->dma_addr[gpuidx];
        struct device *dev = adev->dev;
        struct page *page;
-       uint64_t vram_pages_dev;
        int i, r;
 
        if (!addr) {
@@ -174,7 +173,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
                prange->dma_addr[gpuidx] = addr;
        }
 
-       vram_pages_dev = 0;
        addr += offset;
        for (i = 0; i < npages; i++) {
                if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
@@ -184,7 +182,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
                if (is_zone_device_page(page)) {
                        struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
 
-                       vram_pages_dev++;
                        addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
                                   bo_adev->vm_manager.vram_base_offset -
                                   bo_adev->kfd.pgmap.range.start;
@@ -201,14 +198,14 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
                pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
                                     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
        }
-       *vram_pages = vram_pages_dev;
+
        return 0;
 }
 
 static int
 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
                  unsigned long offset, unsigned long npages,
-                 unsigned long *hmm_pfns, uint64_t *vram_pages)
+                 unsigned long *hmm_pfns)
 {
        struct kfd_process *p;
        uint32_t gpuidx;
@@ -227,7 +224,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
                }
 
                r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
-                                         hmm_pfns, gpuidx, vram_pages);
+                                         hmm_pfns, gpuidx);
                if (r)
                        break;
        }
@@ -407,14 +404,9 @@ static void svm_range_bo_release(struct kref *kref)
                spin_lock(&svm_bo->list_lock);
        }
        spin_unlock(&svm_bo->list_lock);
-       if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
-               /* We're not in the eviction worker.
-                * Signal the fence and synchronize with any
-                * pending eviction work.
-                */
+       if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
+               /* We're not in the eviction worker. Signal the fence. */
                dma_fence_signal(&svm_bo->eviction_fence->base);
-               cancel_work_sync(&svm_bo->eviction_work);
-       }
        dma_fence_put(&svm_bo->eviction_fence->base);
        amdgpu_bo_unref(&svm_bo->bo);
        kfree(svm_bo);
@@ -885,14 +877,29 @@ static void svm_range_debug_dump(struct svm_range_list *svms)
 
 static void *
 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
-                    uint64_t offset)
+                    uint64_t offset, uint64_t *vram_pages)
 {
+       unsigned char *src = (unsigned char *)psrc + offset;
        unsigned char *dst;
+       uint64_t i;
 
        dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
        if (!dst)
                return NULL;
-       memcpy(dst, (unsigned char *)psrc + offset, num_elements * size);
+
+       if (!vram_pages) {
+               memcpy(dst, src, num_elements * size);
+               return (void *)dst;
+       }
+
+       *vram_pages = 0;
+       for (i = 0; i < num_elements; i++) {
+               dma_addr_t *temp;
+               temp = (dma_addr_t *)dst + i;
+               *temp = *((dma_addr_t *)src + i);
+               if (*temp&SVM_RANGE_VRAM_DOMAIN)
+                       (*vram_pages)++;
+       }
 
        return (void *)dst;
 }
@@ -906,7 +913,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
                if (!src->dma_addr[i])
                        continue;
                dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
-                                       sizeof(*src->dma_addr[i]), src->npages, 0);
+                                       sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
                if (!dst->dma_addr[i])
                        return -ENOMEM;
        }
@@ -917,7 +924,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
 static int
 svm_range_split_array(void *ppnew, void *ppold, size_t size,
                      uint64_t old_start, uint64_t old_n,
-                     uint64_t new_start, uint64_t new_n)
+                     uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
 {
        unsigned char *new, *old, *pold;
        uint64_t d;
@@ -929,11 +936,12 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size,
                return 0;
 
        d = (new_start - old_start) * size;
-       new = svm_range_copy_array(pold, size, new_n, d);
+       /* get dma addr array for new range and calculte its vram page number */
+       new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
        if (!new)
                return -ENOMEM;
        d = (new_start == old_start) ? new_n * size : 0;
-       old = svm_range_copy_array(pold, size, old_n, d);
+       old = svm_range_copy_array(pold, size, old_n, d, NULL);
        if (!old) {
                kvfree(new);
                return -ENOMEM;
@@ -955,10 +963,13 @@ svm_range_split_pages(struct svm_range *new, struct svm_range *old,
        for (i = 0; i < MAX_GPU_INSTANCE; i++) {
                r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
                                          sizeof(*old->dma_addr[i]), old->start,
-                                         npages, new->start, new->npages);
+                                         npages, new->start, new->npages,
+                                         old->actual_loc ? &new->vram_pages : NULL);
                if (r)
                        return r;
        }
+       if (old->actual_loc)
+               old->vram_pages -= new->vram_pages;
 
        return 0;
 }
@@ -982,11 +993,6 @@ svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
        new->svm_bo = svm_range_bo_ref(old->svm_bo);
        new->ttm_res = old->ttm_res;
 
-       /* set new's vram_pages as old range's now, the acurate vram_pages
-        * will be updated during mapping
-        */
-       new->vram_pages = min(old->vram_pages, new->npages);
-
        spin_lock(&new->svm_bo->list_lock);
        list_add(&new->svm_bo_list, &new->svm_bo->range_list);
        spin_unlock(&new->svm_bo->list_lock);
@@ -1109,7 +1115,7 @@ static int
 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
                     struct list_head *insert_list, struct list_head *remap_list)
 {
-       struct svm_range *tail;
+       struct svm_range *tail = NULL;
        int r = svm_range_split(prange, prange->start, new_last, &tail);
 
        if (!r) {
@@ -1124,7 +1130,7 @@ static int
 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
                     struct list_head *insert_list, struct list_head *remap_list)
 {
-       struct svm_range *head;
+       struct svm_range *head = NULL;
        int r = svm_range_split(prange, new_start, prange->last, &head);
 
        if (!r) {
@@ -1573,7 +1579,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
        struct svm_validate_context *ctx;
        unsigned long start, end, addr;
        struct kfd_process *p;
-       uint64_t vram_pages;
        void *owner;
        int32_t idx;
        int r = 0;
@@ -1648,15 +1653,13 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                }
        }
 
-       vram_pages = 0;
-       start = prange->start << PAGE_SHIFT;
-       end = (prange->last + 1) << PAGE_SHIFT;
+       start = map_start << PAGE_SHIFT;
+       end = (map_last + 1) << PAGE_SHIFT;
        for (addr = start; !r && addr < end; ) {
                struct hmm_range *hmm_range;
                unsigned long map_start_vma;
                unsigned long map_last_vma;
                struct vm_area_struct *vma;
-               uint64_t vram_pages_vma;
                unsigned long next = 0;
                unsigned long offset;
                unsigned long npages;
@@ -1683,13 +1686,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                }
 
                if (!r) {
-                       offset = (addr - start) >> PAGE_SHIFT;
+                       offset = (addr >> PAGE_SHIFT) - prange->start;
                        r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
-                                             hmm_range->hmm_pfns, &vram_pages_vma);
+                                             hmm_range->hmm_pfns);
                        if (r)
                                pr_debug("failed %d to dma map range\n", r);
-                       else
-                               vram_pages += vram_pages_vma;
                }
 
                svm_range_lock(prange);
@@ -1722,19 +1723,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                addr = next;
        }
 
-       if (addr == end) {
-               prange->vram_pages = vram_pages;
-
-               /* if prange does not include any vram page and it
-                * has not released svm_bo drop its svm_bo reference
-                * and set its actaul_loc to sys ram
-                */
-               if (!vram_pages && prange->ttm_res) {
-                       prange->actual_loc = 0;
-                       svm_range_vram_node_free(prange);
-               }
-       }
-
        svm_range_unreserve_bos(ctx);
        if (!r)
                prange->validate_timestamp = ktime_get_boottime();
@@ -2352,8 +2340,10 @@ retry:
                mutex_unlock(&svms->lock);
                mmap_write_unlock(mm);
 
-               /* Pairs with mmget in svm_range_add_list_work */
-               mmput(mm);
+               /* Pairs with mmget in svm_range_add_list_work. If dropping the
+                * last mm refcount, schedule release work to avoid circular locking
+                */
+               mmput_async(mm);
 
                spin_lock(&svms->deferred_list_lock);
        }
@@ -2664,6 +2654,7 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
 {
        struct vm_area_struct *vma;
        struct interval_tree_node *node;
+       struct rb_node *rb_node;
        unsigned long start_limit, end_limit;
 
        vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
@@ -2683,16 +2674,15 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
        if (node) {
                end_limit = min(end_limit, node->start);
                /* Last range that ends before the fault address */
-               node = container_of(rb_prev(&node->rb),
-                                   struct interval_tree_node, rb);
+               rb_node = rb_prev(&node->rb);
        } else {
                /* Last range must end before addr because
                 * there was no range after addr
                 */
-               node = container_of(rb_last(&p->svms.objects.rb_root),
-                                   struct interval_tree_node, rb);
+               rb_node = rb_last(&p->svms.objects.rb_root);
        }
-       if (node) {
+       if (rb_node) {
+               node = container_of(rb_node, struct interval_tree_node, rb);
                if (node->last >= addr) {
                        WARN(1, "Overlap with prev node and page fault addr\n");
                        return -EFAULT;
@@ -3439,13 +3429,14 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
 
 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
 {
-       if (!fence)
-               return -EINVAL;
-
-       if (dma_fence_is_signaled(&fence->base))
-               return 0;
-
-       if (fence->svm_bo) {
+       /* Dereferencing fence->svm_bo is safe here because the fence hasn't
+        * signaled yet and we're under the protection of the fence->lock.
+        * After the fence is signaled in svm_range_bo_release, we cannot get
+        * here any more.
+        *
+        * Reference is dropped in svm_range_evict_svm_bo_worker.
+        */
+       if (svm_bo_ref_unless_zero(fence->svm_bo)) {
                WRITE_ONCE(fence->svm_bo->evicting, 1);
                schedule_work(&fence->svm_bo->eviction_work);
        }
@@ -3460,8 +3451,6 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
        int r = 0;
 
        svm_bo = container_of(work, struct svm_range_bo, eviction_work);
-       if (!svm_bo_ref_unless_zero(svm_bo))
-               return; /* svm_bo was freed while eviction was pending */
 
        if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
                mm = svm_bo->eviction_fence->mm;
index 057284bf50bbea43c819daa6a8d9f14b85ab7abe..e5f7c92eebcbbfa6a1fda115ca2b599cab48e4e8 100644 (file)
@@ -1342,10 +1342,11 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
                num_cpu++;
        }
 
+       if (list_empty(&kdev->io_link_props))
+               return -ENODATA;
+
        gpu_link = list_first_entry(&kdev->io_link_props,
-                                       struct kfd_iolink_properties, list);
-       if (!gpu_link)
-               return -ENOMEM;
+                                   struct kfd_iolink_properties, list);
 
        for (i = 0; i < num_cpu; i++) {
                /* CPU <--> GPU */
@@ -1423,15 +1424,17 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
                                peer->gpu->adev))
                return ret;
 
+       if (list_empty(&kdev->io_link_props))
+               return -ENODATA;
+
        iolink1 = list_first_entry(&kdev->io_link_props,
-                                                       struct kfd_iolink_properties, list);
-       if (!iolink1)
-               return -ENOMEM;
+                                  struct kfd_iolink_properties, list);
+
+       if (list_empty(&peer->io_link_props))
+               return -ENODATA;
 
        iolink2 = list_first_entry(&peer->io_link_props,
-                                                       struct kfd_iolink_properties, list);
-       if (!iolink2)
-               return -ENOMEM;
+                                  struct kfd_iolink_properties, list);
 
        props = kfd_alloc_struct(props);
        if (!props)
@@ -1449,17 +1452,19 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
                /* CPU->CPU  link*/
                cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
                if (cpu_dev) {
-                       list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
-                               if (iolink3->node_to == iolink2->node_to)
-                                       break;
-
-                       props->weight += iolink3->weight;
-                       props->min_latency += iolink3->min_latency;
-                       props->max_latency += iolink3->max_latency;
-                       props->min_bandwidth = min(props->min_bandwidth,
-                                                       iolink3->min_bandwidth);
-                       props->max_bandwidth = min(props->max_bandwidth,
-                                                       iolink3->max_bandwidth);
+                       list_for_each_entry(iolink3, &cpu_dev->io_link_props, list) {
+                               if (iolink3->node_to != iolink2->node_to)
+                                       continue;
+
+                               props->weight += iolink3->weight;
+                               props->min_latency += iolink3->min_latency;
+                               props->max_latency += iolink3->max_latency;
+                               props->min_bandwidth = min(props->min_bandwidth,
+                                                          iolink3->min_bandwidth);
+                               props->max_bandwidth = min(props->max_bandwidth,
+                                                          iolink3->max_bandwidth);
+                               break;
+                       }
                } else {
                        WARN(1, "CPU node not found");
                }
index 54861136dafd911461f8de9f0e5cbc36282dfab2..d55eeb30ccb2cf8347607f276ed390fa932fb4e0 100644 (file)
@@ -37,6 +37,7 @@
 #include "dc/dc_dmub_srv.h"
 #include "dc/dc_edid_parser.h"
 #include "dc/dc_stat.h"
+#include "dc/dc_state.h"
 #include "amdgpu_dm_trace.h"
 #include "dpcd_defs.h"
 #include "link/protocols/link_dpcd.h"
@@ -66,7 +67,6 @@
 #include "amdgpu_dm_debugfs.h"
 #endif
 #include "amdgpu_dm_psr.h"
-#include "amdgpu_dm_replay.h"
 
 #include "ivsrcid/ivsrcid_vislands30.h"
 
@@ -1294,7 +1294,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
        /* AGP aperture is disabled */
        if (agp_bot > agp_top) {
                logical_addr_low = adev->gmc.fb_start >> 18;
-               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+               if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+                                      AMD_APU_IS_RENOIR |
+                                      AMD_APU_IS_GREEN_SARDINE))
                        /*
                         * Raven2 has a HW issue that it is unable to use the vram which
                         * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
@@ -1306,7 +1308,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
                        logical_addr_high = adev->gmc.fb_end >> 18;
        } else {
                logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
-               if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+               if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+                                      AMD_APU_IS_RENOIR |
+                                      AMD_APU_IS_GREEN_SARDINE))
                        /*
                         * Raven2 has a HW issue that it is unable to use the vram which
                         * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
@@ -1711,6 +1715,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
        init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
 
+       init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+
        /* Enable DWB for tested platforms only */
        if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
                init_data.num_virtual_links = 1;
@@ -2607,12 +2613,10 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
 
        memset(del_streams, 0, sizeof(del_streams));
 
-       context = dc_create_state(dc);
+       context = dc_state_create_current_copy(dc);
        if (context == NULL)
                goto context_alloc_fail;
 
-       dc_resource_state_copy_construct_current(dc, context);
-
        /* First remove from context all streams */
        for (i = 0; i < context->stream_count; i++) {
                struct dc_stream_state *stream = context->streams[i];
@@ -2622,12 +2626,12 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
 
        /* Remove all planes for removed streams and then remove the streams */
        for (i = 0; i < del_streams_count; i++) {
-               if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+               if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
                        res = DC_FAIL_DETACH_SURFACES;
                        goto fail;
                }
 
-               res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
+               res = dc_state_remove_stream(dc, context, del_streams[i]);
                if (res != DC_OK)
                        goto fail;
        }
@@ -2635,7 +2639,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
        res = dc_commit_streams(dc, context->streams, context->stream_count);
 
 fail:
-       dc_release_state(context);
+       dc_state_release(context);
 
 context_alloc_fail:
        return res;
@@ -2662,7 +2666,7 @@ static int dm_suspend(void *handle)
 
                dc_allow_idle_optimizations(adev->dm.dc, false);
 
-               dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
+               dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
 
                dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
 
@@ -2856,7 +2860,7 @@ static int dm_resume(void *handle)
        bool need_hotplug = false;
 
        if (dm->dc->caps.ips_support) {
-               dc_dmub_srv_exit_low_power_state(dm->dc);
+               dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
        }
 
        if (amdgpu_in_reset(adev)) {
@@ -2909,7 +2913,7 @@ static int dm_resume(void *handle)
 
                dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
 
-               dc_release_state(dm->cached_dc_state);
+               dc_state_release(dm->cached_dc_state);
                dm->cached_dc_state = NULL;
 
                amdgpu_dm_irq_resume_late(adev);
@@ -2919,10 +2923,9 @@ static int dm_resume(void *handle)
                return 0;
        }
        /* Recreate dc_state - DC invalidates it when setting power state to S3. */
-       dc_release_state(dm_state->context);
-       dm_state->context = dc_create_state(dm->dc);
+       dc_state_release(dm_state->context);
+       dm_state->context = dc_state_create(dm->dc);
        /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
-       dc_resource_state_construct(dm->dc, dm_state->context);
 
        /* Before powering on DC we need to re-initialize DMUB. */
        dm_dmub_hw_resume(adev);
@@ -3998,7 +4001,7 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj)
        old_state = to_dm_atomic_state(obj->state);
 
        if (old_state && old_state->context)
-               new_state->context = dc_copy_state(old_state->context);
+               new_state->context = dc_state_create_copy(old_state->context);
 
        if (!new_state->context) {
                kfree(new_state);
@@ -4014,7 +4017,7 @@ static void dm_atomic_destroy_state(struct drm_private_obj *obj,
        struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 
        if (dm_state && dm_state->context)
-               dc_release_state(dm_state->context);
+               dc_state_release(dm_state->context);
 
        kfree(dm_state);
 }
@@ -4050,14 +4053,12 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
        if (!state)
                return -ENOMEM;
 
-       state->context = dc_create_state(adev->dm.dc);
+       state->context = dc_state_create_current_copy(adev->dm.dc);
        if (!state->context) {
                kfree(state);
                return -ENOMEM;
        }
 
-       dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
-
        drm_atomic_private_obj_init(adev_to_drm(adev),
                                    &adev->dm.atomic_obj,
                                    &state->base,
@@ -4065,7 +4066,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 
        r = amdgpu_display_modeset_create_props(adev);
        if (r) {
-               dc_release_state(state->context);
+               dc_state_release(state->context);
                kfree(state);
                return r;
        }
@@ -4077,7 +4078,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 
        r = amdgpu_dm_audio_init(adev);
        if (r) {
-               dc_release_state(state->context);
+               dc_state_release(state->context);
                kfree(state);
                return r;
        }
@@ -4391,7 +4392,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        enum dc_connection_type new_connection_type = dc_connection_none;
        const struct dc_plane_cap *plane;
        bool psr_feature_enabled = false;
-       bool replay_feature_enabled = false;
        int max_overlay = dm->dc->caps.max_slave_planes;
 
        dm->display_indexes_num = dm->dc->caps.max_streams;
@@ -4503,20 +4503,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                }
        }
 
-       if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
-               switch (adev->ip_versions[DCE_HWIP][0]) {
-               case IP_VERSION(3, 1, 4):
-               case IP_VERSION(3, 1, 5):
-               case IP_VERSION(3, 1, 6):
-               case IP_VERSION(3, 2, 0):
-               case IP_VERSION(3, 2, 1):
-                       replay_feature_enabled = true;
-                       break;
-               default:
-                       replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
-                       break;
-               }
-       }
        /* loops over all connectors on the board */
        for (i = 0; i < link_cnt; i++) {
                struct dc_link *link = NULL;
@@ -4585,12 +4571,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                                amdgpu_dm_update_connector_after_detect(aconnector);
                                setup_backlight_device(dm, aconnector);
 
-                               /*
-                                * Disable psr if replay can be enabled
-                                */
-                               if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
-                                       psr_feature_enabled = false;
-
                                if (psr_feature_enabled)
                                        amdgpu_dm_set_psr_caps(link);
 
@@ -6260,8 +6240,9 @@ create_stream_for_sink(struct drm_connector *connector,
 
        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
                mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
-
-       if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
+       else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+                        stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+                        stream->signal == SIGNAL_TYPE_EDP) {
                //
                // should decide stream support vsc sdp colorimetry capability
                // before building vsc info packet
@@ -6277,8 +6258,9 @@ create_stream_for_sink(struct drm_connector *connector,
                if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
                        tf = TRANSFER_FUNC_GAMMA_22;
                mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
-               aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
 
+               if (stream->link->psr_settings.psr_feature_enabled)
+                       aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
        }
 finish:
        dc_sink_release(sink);
@@ -6658,7 +6640,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
        if (!dc_plane_state)
                goto cleanup;
 
-       dc_state = dc_create_state(dc);
+       dc_state = dc_state_create(dc);
        if (!dc_state)
                goto cleanup;
 
@@ -6685,9 +6667,9 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
                dc_result = dc_validate_plane(dc, dc_plane_state);
 
        if (dc_result == DC_OK)
-               dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
+               dc_result = dc_state_add_stream(dc, dc_state, stream);
 
-       if (dc_result == DC_OK && !dc_add_plane_to_context(
+       if (dc_result == DC_OK && !dc_state_add_plane(
                                                dc,
                                                stream,
                                                dc_plane_state,
@@ -6699,7 +6681,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
 
 cleanup:
        if (dc_state)
-               dc_release_state(dc_state);
+               dc_state_release(dc_state);
 
        if (dc_plane_state)
                dc_plane_state_release(dc_plane_state);
@@ -7007,8 +6989,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
        if (IS_ERR(mst_state))
                return PTR_ERR(mst_state);
 
-       if (!mst_state->pbn_div.full)
-               mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+       mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
 
        if (!state->duplicated) {
                int max_bpc = conn_state->max_requested_bpc;
@@ -8858,7 +8839,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
                                        dc_stream_get_status(dm_new_crtc_state->stream);
 
                        if (!status)
-                               status = dc_stream_get_status_from_state(dc_state,
+                               status = dc_state_get_stream_status(dc_state,
                                                                         dm_new_crtc_state->stream);
                        if (!status)
                                drm_err(dev,
@@ -9001,7 +8982,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        if (new_con_state->crtc &&
                                new_con_state->crtc->state->active &&
                                drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
-                               dc_dmub_srv_exit_low_power_state(dm->dc);
+                               dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
                                break;
                        }
                }
@@ -9312,10 +9293,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                if (!new_con_state->writeback_job)
                        continue;
 
-               new_crtc_state = NULL;
+               new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
 
-               if (acrtc)
-                       new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+               if (!new_crtc_state)
+                       continue;
 
                if (acrtc->wb_enabled)
                        continue;
@@ -9783,7 +9764,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                                crtc->base.id);
 
                /* i.e. reset mode */
-               if (dc_remove_stream_from_ctx(
+               if (dc_state_remove_stream(
                                dm->dc,
                                dm_state->context,
                                dm_old_crtc_state->stream) != DC_OK) {
@@ -9826,7 +9807,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                        DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
                                         crtc->base.id);
 
-                       if (dc_add_stream_to_ctx(
+                       if (dc_state_add_stream(
                                        dm->dc,
                                        dm_state->context,
                                        dm_new_crtc_state->stream) != DC_OK) {
@@ -10148,7 +10129,7 @@ static int dm_update_plane_state(struct dc *dc,
                if (ret)
                        return ret;
 
-               if (!dc_remove_plane_from_context(
+               if (!dc_state_remove_plane(
                                dc,
                                dm_old_crtc_state->stream,
                                dm_old_plane_state->dc_state,
@@ -10226,7 +10207,7 @@ static int dm_update_plane_state(struct dc *dc,
                 * state. It'll be released when the atomic state is
                 * cleaned.
                 */
-               if (!dc_add_plane_to_context(
+               if (!dc_state_add_plane(
                                dc,
                                dm_new_crtc_state->stream,
                                dc_new_plane_state,
@@ -10910,7 +10891,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
        input->cea_total_length = total_length;
        memcpy(input->payload, data, length);
 
-       res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+       res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
        if (!res) {
                DRM_ERROR("EDID CEA parser failed\n");
                return false;
index 2d5af83d40b53bd128a85235a44126e8b3f95734..9c1871b866cc973091cd9a7bc5bc604201f6bd16 100644 (file)
@@ -747,7 +747,7 @@ enum amdgpu_transfer_function {
        AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF,
        AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF,
        AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF,
-        AMDGPU_TRANSFER_FUNCTION_COUNT
+       AMDGPU_TRANSFER_FUNCTION_COUNT
 };
 
 struct dm_plane_state {
@@ -844,7 +844,7 @@ struct dm_crtc_state {
 
        int abm_level;
 
-        /**
+       /**
         * @regamma_tf:
         *
         * Pre-defined transfer function for converting internal FB -> wire
index c6ed0d854b01d224c2256c65a145718f39abb521..c87b64e464ed5c8e13c6fb823b8bf9ca0bcfe0fc 100644 (file)
@@ -85,6 +85,18 @@ void amdgpu_dm_init_color_mod(void)
        setup_x_points_distribution();
 }
 
+static inline struct fixed31_32 amdgpu_dm_fixpt_from_s3132(__u64 x)
+{
+       struct fixed31_32 val;
+
+       /* If negative, convert to 2's complement. */
+       if (x & (1ULL << 63))
+               x = -(x & ~(1ULL << 63));
+
+       val.value = x;
+       return val;
+}
+
 #ifdef AMD_PRIVATE_COLOR
 /* Pre-defined Transfer Functions (TF)
  *
@@ -430,7 +442,7 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
                }
 
                /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
-               matrix[i] = dc_fixpt_from_s3132(ctm->matrix[i - (i / 4)]);
+               matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i - (i / 4)]);
        }
 }
 
@@ -452,7 +464,7 @@ static void __drm_ctm_3x4_to_dc_matrix(const struct drm_color_ctm_3x4 *ctm,
         */
        for (i = 0; i < 12; i++) {
                /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
-               matrix[i] = dc_fixpt_from_s3132(ctm->matrix[i]);
+               matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i]);
        }
 }
 
@@ -630,8 +642,7 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f
 static enum dc_transfer_func_predefined
 amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
 {
-       switch (tf)
-       {
+       switch (tf) {
        default:
        case AMDGPU_TRANSFER_FUNCTION_DEFAULT:
        case AMDGPU_TRANSFER_FUNCTION_IDENTITY:
@@ -1137,7 +1148,7 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
        uint32_t shaper_size, lut3d_size, blend_size;
        int ret;
 
-       dc_plane_state->hdr_mult = dc_fixpt_from_s3132(dm_plane_state->hdr_mult);
+       dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(dm_plane_state->hdr_mult);
 
        shaper_lut = __extract_blob_lut(dm_plane_state->shaper_lut, &shaper_size);
        shaper_size = shaper_lut != NULL ? shaper_size : 0;
@@ -1225,10 +1236,10 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
         * plane and CRTC degamma at the same time. Explicitly reject atomic
         * updates when userspace sets both plane and CRTC degamma properties.
         */
-       if (has_crtc_cm_degamma && ret != -EINVAL){
+       if (has_crtc_cm_degamma && ret != -EINVAL) {
                drm_dbg_kms(crtc->base.crtc->dev,
                            "doesn't support plane and CRTC degamma at the same time\n");
-                       return -EINVAL;
+               return -EINVAL;
        }
 
        /* If we are here, it means we don't have plane degamma settings, check
index 7545a184e43a66df185e9d7240c30c6189e5f2df..6e715ef3a5566edb1f65bab544b2017dc176b7a3 100644 (file)
@@ -29,7 +29,6 @@
 #include "dc.h"
 #include "amdgpu.h"
 #include "amdgpu_dm_psr.h"
-#include "amdgpu_dm_replay.h"
 #include "amdgpu_dm_crtc.h"
 #include "amdgpu_dm_plane.h"
 #include "amdgpu_dm_trace.h"
@@ -124,12 +123,7 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
         * fill_dc_dirty_rects().
         */
        if (vblank_work->stream && vblank_work->stream->link) {
-               /*
-                * Prioritize replay, instead of psr
-                */
-               if (vblank_work->stream->link->replay_settings.replay_feature_enabled)
-                       amdgpu_dm_replay_enable(vblank_work->stream, false);
-               else if (vblank_work->enable) {
+               if (vblank_work->enable) {
                        if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
                            vblank_work->stream->link->psr_settings.psr_allow_active)
                                amdgpu_dm_psr_disable(vblank_work->stream);
@@ -138,7 +132,6 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
                           !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
 #endif
-                          vblank_work->stream->link->panel_config.psr.disallow_replay &&
                           vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
                        amdgpu_dm_psr_enable(vblank_work->stream);
                }
@@ -312,7 +305,7 @@ dm_crtc_additional_color_mgmt(struct drm_crtc *crtc)
 {
        struct amdgpu_device *adev = drm_to_adev(crtc->dev);
 
-       if(adev->dm.dc->caps.color.mpc.ogam_ram)
+       if (adev->dm.dc->caps.color.mpc.ogam_ram)
                drm_object_attach_property(&crtc->base,
                                           adev->mode_info.regamma_tf_property,
                                           AMDGPU_TRANSFER_FUNCTION_DEFAULT);
index 98b41ec7288e8979867680e624d4a961c8722b3f..68a846323912768edea6c6d330a3491c3aff1b27 100644 (file)
@@ -2976,7 +2976,6 @@ static int dmub_trace_mask_set(void *data, u64 val)
        struct amdgpu_device *adev = data;
        struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
        enum dmub_gpint_command cmd;
-       enum dmub_status status;
        u64 mask = 0xffff;
        u8 shift = 0;
        u32 res;
@@ -3003,13 +3002,7 @@ static int dmub_trace_mask_set(void *data, u64 val)
                        break;
                }
 
-               status = dmub_srv_send_gpint_command(srv, cmd, res, 30);
-
-               if (status == DMUB_STATUS_TIMEOUT)
-                       return -ETIMEDOUT;
-               else if (status == DMUB_STATUS_INVALID)
-                       return -EINVAL;
-               else if (status != DMUB_STATUS_OK)
+               if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, res, NULL, DM_DMUB_WAIT_TYPE_WAIT))
                        return -EIO;
 
                usleep_range(100, 1000);
@@ -3026,7 +3019,6 @@ static int dmub_trace_mask_show(void *data, u64 *val)
        enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0;
        struct amdgpu_device *adev = data;
        struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
-       enum dmub_status status;
        u8 shift = 0;
        u64 raw = 0;
        u64 res = 0;
@@ -3036,23 +3028,12 @@ static int dmub_trace_mask_show(void *data, u64 *val)
                return -EINVAL;
 
        while (i < 4) {
-               status = dmub_srv_send_gpint_command(srv, cmd, 0, 30);
-
-               if (status == DMUB_STATUS_OK) {
-                       status = dmub_srv_get_gpint_response(srv, (u32 *) &raw);
-
-                       if (status == DMUB_STATUS_INVALID)
-                               return -EINVAL;
-                       else if (status != DMUB_STATUS_OK)
-                               return -EIO;
-               } else if (status == DMUB_STATUS_TIMEOUT) {
-                       return -ETIMEDOUT;
-               } else if (status == DMUB_STATUS_INVALID) {
-                       return -EINVAL;
-               } else {
+               uint32_t response;
+
+               if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, 0, &response, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
                        return -EIO;
-               }
 
+               raw = response;
                usleep_range(100, 1000);
 
                cmd++;
index eaf8d9f482446d5ea9728ec17657189e25917ae8..85b7f58a7f35a478f551ec097b1613b504ced535 100644 (file)
@@ -979,6 +979,11 @@ int dm_helper_dmub_aux_transfer_sync(
                struct aux_payload *payload,
                enum aux_return_code_type *operation_result)
 {
+       if (!link->hpd_status) {
+               *operation_result = AUX_RET_ERROR_HPD_DISCON;
+               return -1;
+       }
+
        return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
                        operation_result);
 }
index 08ce3bb8f640d9dc24de199526383b18b1cb4469..1f08c6564c3bfea6dbc047b04eb1ee7603df0ead 100644 (file)
@@ -51,6 +51,9 @@ static bool link_supports_psrsu(struct dc_link *link)
            !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
                return false;
 
+       if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
+               return false;
+
        return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
 }
 
index 390e7a99be5410e53dfc3b321baaa9f0ec65140d..7991ae468f752c2e56d35d280795cb0e3758cde6 100644 (file)
@@ -34,8 +34,6 @@ DC_LIBS += dcn21
 DC_LIBS += dcn201
 DC_LIBS += dcn30
 DC_LIBS += dcn301
-DC_LIBS += dcn302
-DC_LIBS += dcn303
 DC_LIBS += dcn31
 DC_LIBS += dcn314
 DC_LIBS += dcn32
@@ -62,7 +60,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
 include $(AMD_DC)
 
 DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o
+dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o
 
 DISPLAY_CORE += dc_vm_helper.o
 
index e295a839ab4761337475e58ff0264115eded2830..1090d235086aca2f99af4c5f7f5483574b7813af 100644 (file)
@@ -103,7 +103,8 @@ void convert_float_matrix(
 
 static uint32_t find_gcd(uint32_t a, uint32_t b)
 {
-       uint32_t remainder = 0;
+       uint32_t remainder;
+
        while (b != 0) {
                remainder = a % b;
                a = b;
index fcd65a2057ad4722257d23dcdfa9b030a09ccd16..960c4b4f6ddf3670156abd99cc0a02aeb176c7dc 100644 (file)
@@ -1014,13 +1014,20 @@ static enum bp_result get_ss_info_v4_5(
                DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
                break;
        case AS_SIGNAL_TYPE_DISPLAY_PORT:
-               ss_info->spread_spectrum_percentage =
+               if (bp->base.integrated_info) {
+                       DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage);
+                       ss_info->spread_spectrum_percentage =
+                                       bp->base.integrated_info->gpuclk_ss_percentage;
+                       ss_info->type.CENTER_MODE =
+                                       bp->base.integrated_info->gpuclk_ss_type;
+               } else {
+                       ss_info->spread_spectrum_percentage =
                                disp_cntl_tbl->dp_ss_percentage;
-               ss_info->spread_spectrum_range =
+                       ss_info->spread_spectrum_range =
                                disp_cntl_tbl->dp_ss_rate_10hz * 10;
-               if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
-                       ss_info->type.CENTER_MODE = true;
-
+                       if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+                               ss_info->type.CENTER_MODE = true;
+               }
                DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
                break;
        case AS_SIGNAL_TYPE_GPU_PLL:
@@ -2813,6 +2820,8 @@ static enum bp_result get_integrated_info_v2_2(
        info->ma_channel_number = info_v2_2->umachannelnumber;
        info->dp_ss_control =
                le16_to_cpu(info_v2_2->reserved1);
+       info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage;
+       info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type;
 
        for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
                info->ext_disp_conn_info.gu_id[i] =
index ab0adabf9dd4c61f96642201d0557a82186a80dc..293a919d605d1657d3963ed292cd4c3115d3fb3f 100644 (file)
@@ -123,7 +123,7 @@ static void encoder_control_dmcub(
                sizeof(cmd.digx_encoder_control.header);
        cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
 
-       dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static enum bp_result encoder_control_digx_v1_5(
@@ -259,7 +259,7 @@ static void transmitter_control_dmcub(
                sizeof(cmd.dig1_transmitter_control.header);
        cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
 
-       dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static enum bp_result transmitter_control_v1_6(
@@ -321,7 +321,7 @@ static void transmitter_control_dmcub_v1_7(
                sizeof(cmd.dig1_transmitter_control.header);
        cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig;
 
-       dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static enum bp_result transmitter_control_v1_7(
@@ -429,7 +429,7 @@ static void set_pixel_clock_dmcub(
                sizeof(cmd.set_pixel_clock.header);
        cmd.set_pixel_clock.pixel_clock.clk = *clk;
 
-       dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static enum bp_result set_pixel_clock_v7(
@@ -796,7 +796,7 @@ static void enable_disp_power_gating_dmcub(
                sizeof(cmd.enable_disp_power_gating.header);
        cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
 
-       dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static enum bp_result enable_disp_power_gating_v2_1(
@@ -1006,7 +1006,7 @@ static void enable_lvtma_control_dmcub(
                        pwrseq_instance;
        cmd.lvtma_control.data.bypass_panel_control_wait =
                        bypass_panel_control_wait;
-       dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static enum bp_result enable_lvtma_control(
index 7dacb0f82d29b5c50daa73527d9b04f87960c5c3..28a2a837d2f0a72965eb8dbb9e5ec468721b2e27 100644 (file)
@@ -29,6 +29,7 @@
 #include "dc_types.h"
 #include "dccg.h"
 #include "clk_mgr_internal.h"
+#include "dc_state_priv.h"
 #include "link.h"
 
 #include "dce100/dce_clk_mgr.h"
@@ -63,7 +64,7 @@ int clk_mgr_helper_get_active_display_cnt(
                /* Don't count SubVP phantom pipes as part of active
                 * display count
                 */
-               if (stream->mall_stream_config.type == SUBVP_PHANTOM)
+               if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
                        continue;
 
                /*
index 3db4ef564b997a54880fcfff1aa28c7efa0d4b51..ce1386e22576ece836a16d78d39b14d217139e78 100644 (file)
@@ -253,7 +253,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
        cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
        cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
index 7326b756584610b9c7d54f35925bb0ece88102b9..a84f1e376dee45f7fbefea37053c0df57074789a 100644 (file)
@@ -87,6 +87,20 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
 #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK     0x0000F000L
 #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK     0xFFFF0000L
 
+#define regCLK1_CLK2_BYPASS_CNTL                       0x029c
+#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX      0
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT  0x0
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT  0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK            0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK            0x000F0000L
+
+#define regCLK6_0_CLK6_spll_field_8                            0x464b
+#define regCLK6_0_CLK6_spll_field_8_BASE_IDX   0
+
+#define CLK6_0_CLK6_spll_field_8__spll_ssc_en__SHIFT   0xd
+#define CLK6_0_CLK6_spll_field_8__spll_ssc_en_MASK             0x00002000L
+
 #define REG(reg_name) \
        (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
 
@@ -131,35 +145,63 @@ static int dcn314_get_active_display_cnt_wa(
        return display_count;
 }
 
-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+                                 bool safe_to_lower, bool disable)
 {
        struct dc *dc = clk_mgr_base->ctx->dc;
        int i;
 
        for (i = 0; i < dc->res_pool->pipe_count; ++i) {
-               struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *pipe = safe_to_lower
+                       ? &context->res_ctx.pipe_ctx[i]
+                       : &dc->current_state->res_ctx.pipe_ctx[i];
 
                if (pipe->top_pipe || pipe->prev_odm_pipe)
                        continue;
                if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
-                       struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
-
                        if (disable) {
-                               if (stream_enc && stream_enc->funcs->disable_fifo)
-                                       pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
+                               if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+                                       pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
 
-                               pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
                                reset_sync_context_for_pipe(dc, context, i);
                        } else {
                                pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
-
-                               if (stream_enc && stream_enc->funcs->enable_fifo)
-                                       pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
                        }
                }
        }
 }
 
+bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+       struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+       uint32_t ssc_enable;
+
+       REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable);
+
+       return ssc_enable == 1;
+}
+
+void dcn314_init_clocks(struct clk_mgr *clk_mgr)
+{
+       struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+       uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
+       memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+       // Assumption is that boot state always supports pstate
+       clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk;      // restore ref_dtbclk
+       clk_mgr->clks.p_state_change_support = true;
+       clk_mgr->clks.prev_p_state_change_support = true;
+       clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+       clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+
+       // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+       if (dcn314_is_spll_ssc_enabled(clk_mgr))
+               clk_mgr->dp_dto_source_clock_in_khz =
+                       dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+       else
+               clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+}
+
 void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
                        struct dc_state *context,
                        bool safe_to_lower)
@@ -252,11 +294,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
        }
 
        if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
-               dcn314_disable_otg_wa(clk_mgr_base, context, true);
+               dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
 
                clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
                dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
-               dcn314_disable_otg_wa(clk_mgr_base, context, false);
+               dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
 
                update_dispclk = true;
        }
@@ -284,7 +326,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
        cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
        cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
@@ -436,6 +478,11 @@ static DpmClocks314_t dummy_clocks;
 
 static struct dcn314_watermarks dummy_wms = { 0 };
 
+static struct dcn314_ss_info_table ss_info_table = {
+       .ss_divider = 1000,
+       .ss_percentage = {0, 0, 375, 375, 375}
+};
+
 static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table)
 {
        int i, num_valid_sets;
@@ -708,13 +755,31 @@ static struct clk_mgr_funcs dcn314_funcs = {
        .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
        .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
        .update_clocks = dcn314_update_clocks,
-       .init_clocks = dcn31_init_clocks,
+       .init_clocks = dcn314_init_clocks,
        .enable_pme_wa = dcn314_enable_pme_wa,
        .are_clock_states_equal = dcn314_are_clock_states_equal,
        .notify_wm_ranges = dcn314_notify_wm_ranges
 };
 extern struct clk_mgr_funcs dcn3_fpga_funcs;
 
+static void dcn314_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+       uint32_t clock_source;
+       //uint32_t ssc_enable;
+
+       REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+       //REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable);
+
+       if (dcn314_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+               clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+               if (clk_mgr->dprefclk_ss_percentage != 0) {
+                       clk_mgr->ss_on_dprefclk = true;
+                       clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+               }
+       }
+}
+
 void dcn314_clk_mgr_construct(
                struct dc_context *ctx,
                struct clk_mgr_dcn314 *clk_mgr,
@@ -782,6 +847,7 @@ void dcn314_clk_mgr_construct(
        clk_mgr->base.base.dprefclk_khz = 600000;
        clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
        dce_clock_read_ss_info(&clk_mgr->base);
+       dcn314_read_ss_info_from_lut(&clk_mgr->base);
        /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
 
        clk_mgr->base.base.bw_params = &dcn314_bw_params;
index 171f84340eb2fb1d532776ac348cc1fbfad858f5..002c28e807208e584396fdc99dc1822072e8ffa5 100644 (file)
@@ -28,6 +28,8 @@
 #define __DCN314_CLK_MGR_H__
 #include "clk_mgr_internal.h"
 
+#define DCN314_NUM_CLOCK_SOURCES   5
+
 struct dcn314_watermarks;
 
 struct dcn314_smu_watermark_set {
@@ -40,9 +42,18 @@ struct clk_mgr_dcn314 {
        struct dcn314_smu_watermark_set smu_wm_set;
 };
 
+struct dcn314_ss_info_table {
+       uint32_t ss_divider;
+       uint32_t ss_percentage[DCN314_NUM_CLOCK_SOURCES];
+};
+
 bool dcn314_are_clock_states_equal(struct dc_clocks *a,
                struct dc_clocks *b);
 
+bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base);
+
+void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
 void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
                        struct dc_state *context,
                        bool safe_to_lower);
index 8776055bbeaaea292ac99e162e2b5fc839f9248d..644da463732093f9d3798b3de9565b5f7fd9ea0b 100644 (file)
@@ -232,7 +232,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
        cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
        cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
index 09151cc56ce4f2ca70d9af12c5c1d3ff7340ee3f..12f3e8aa46d8dfae21b5dd1e9f4ef167ee314f2d 100644 (file)
@@ -239,7 +239,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
        cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
        cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
index 95c0b49b531a082e550ab6ac39e6a3b19fc35332..aadd07bc68c5dba969fb4f7e158ba1ea990c7b5a 100644 (file)
@@ -25,7 +25,6 @@
 
 #include "dccg.h"
 #include "clk_mgr_internal.h"
-
 #include "dcn32/dcn32_clk_mgr_smu_msg.h"
 #include "dcn20/dcn20_clk_mgr.h"
 #include "dce100/dce_clk_mgr.h"
@@ -34,7 +33,7 @@
 #include "core_types.h"
 #include "dm_helpers.h"
 #include "link.h"
-
+#include "dc_state_priv.h"
 #include "atomfirmware.h"
 #include "smu13_driver_if.h"
 
@@ -458,13 +457,43 @@ static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
        return 0;
 }
 
-static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr)
+static bool dcn32_check_native_scaling(struct pipe_ctx *pipe)
+{
+       bool is_native_scaling = false;
+       int width = pipe->plane_state->src_rect.width;
+       int height = pipe->plane_state->src_rect.height;
+
+       if (pipe->stream->timing.h_addressable == width &&
+                       pipe->stream->timing.v_addressable == height &&
+                       pipe->plane_state->dst_rect.width == width &&
+                       pipe->plane_state->dst_rect.height == height)
+               is_native_scaling = true;
+
+       return is_native_scaling;
+}
+
+static void dcn32_auto_dpm_test_log(
+               struct dc_clocks *new_clocks,
+               struct clk_mgr_internal *clk_mgr,
+               struct dc_state *context)
 {
        unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg,
-                                fclk_khz_reg;
+                                fclk_khz_reg, mall_ss_size_bytes;
        int dramclk_khz_override, fclk_khz_override, num_fclk_levels;
 
-       msleep(5);
+       struct pipe_ctx *pipe_ctx_list[MAX_PIPES];
+       int active_pipe_count = 0;
+
+       for (int i = 0; i < MAX_PIPES; i++) {
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+               if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
+                       pipe_ctx_list[active_pipe_count] = pipe_ctx;
+                       active_pipe_count++;
+               }
+       }
+
+       mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
 
     dispclk_khz_reg    = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
     dppclk_khz_reg     = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
@@ -494,16 +523,49 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr
        //
        //                              AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n"
        ////////////////////////////////////////////////////////////////////////////
-       if (new_clocks &&
+       if (new_clocks && active_pipe_count > 0 &&
                new_clocks->dramclk_khz > 0 &&
                new_clocks->fclk_khz > 0 &&
                new_clocks->dcfclk_khz > 0 &&
                new_clocks->dppclk_khz > 0) {
 
+               uint32_t pix_clk_list[MAX_PIPES] = {0};
+               int p_state_list[MAX_PIPES] = {0};
+               int disp_src_width_list[MAX_PIPES] = {0};
+               int disp_src_height_list[MAX_PIPES] = {0};
+               uint64_t disp_src_refresh_list[MAX_PIPES] = {0};
+               bool is_scaled_list[MAX_PIPES] = {0};
+
+               for (int i = 0; i < active_pipe_count; i++) {
+                       struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i];
+                       uint64_t refresh_rate;
+
+                       pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz;
+                       p_state_list[i] = curr_pipe_ctx->p_state_type;
+
+                       refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 +
+                               curr_pipe_ctx->stream->timing.v_total * curr_pipe_ctx->stream->timing.h_total - (uint64_t)1);
+                       refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total);
+                       refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total);
+                       disp_src_refresh_list[i] = refresh_rate;
+
+                       if (curr_pipe_ctx->plane_state) {
+                               is_scaled_list[i] = !(dcn32_check_native_scaling(curr_pipe_ctx));
+                               disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width;
+                               disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height;
+                       }
+               }
+
                DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - "
                        "dcfclk:%d - dppclk:%d - dispclk_hw:%d - "
                        "dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - "
-                       "dtbclk_hw:%d - fclk_hw:%d\n",
+                       "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - "
+                       "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - "
+                       "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - "
+                       "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - "
+                       "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - "
+                       "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - "
+                       "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n",
                        dramclk_khz_override,
                        fclk_khz_override,
                        new_clocks->dcfclk_khz,
@@ -513,7 +575,14 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr
                        dprefclk_khz_reg,
                        dcfclk_khz_reg,
                        dtbclk_khz_reg,
-                       fclk_khz_reg);
+                       fclk_khz_reg,
+                       pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2],
+                       mall_ss_size_bytes,
+                       p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3],
+                       disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0],
+                       disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1],
+                       disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2],
+                       disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]);
        }
 }
 
@@ -686,6 +755,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
                /* DCCG requires KHz precision for DTBCLK */
                clk_mgr_base->clks.ref_dtbclk_khz =
                                dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
+
                dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
        }
 
@@ -713,8 +783,8 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
                dmcu->funcs->set_psr_wait_loop(dmcu,
                                clk_mgr_base->clks.dispclk_khz / 1000 / 7);
 
-       if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) {
-           dcn32_auto_dpm_test_log(new_clocks, clk_mgr);
+       if (dc->config.enable_auto_dpm_test_logs) {
+           dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context);
        }
 }
 
index 8d4c0b209872babcafc72f31a1ed7ef0293ff2e7..9c660d1facc7699d7a1b3f90292ae31d985fd259 100644 (file)
@@ -50,6 +50,7 @@
 #include "dc_dmub_srv.h"
 #include "link.h"
 #include "logger_types.h"
+
 #undef DC_LOGGER
 #define DC_LOGGER \
        clk_mgr->base.base.ctx->logger
@@ -342,7 +343,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
        cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
        cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
@@ -417,9 +418,8 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a,
 }
 
 static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
-               struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
+               struct clk_mgr_dcn35 *clk_mgr)
 {
-
 }
 
 static struct clk_bw_params dcn35_bw_params = {
@@ -986,7 +986,6 @@ void dcn35_clk_mgr_construct(
                struct dccg *dccg)
 {
        struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
-       struct clk_log_info log_info = {0};
        clk_mgr->base.base.ctx = ctx;
        clk_mgr->base.base.funcs = &dcn35_funcs;
 
@@ -1039,7 +1038,7 @@ void dcn35_clk_mgr_construct(
                dcn35_bw_params.wm_table = ddr5_wm_table;
        }
        /* Saved clocks configured at boot for debug purposes */
-       dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
+       dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
 
        clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
        clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
index 9d3925603979e80e5db1c64190d688696ed7295a..aa7c02ba948e9ce63aa84eb7518f9c73c80d107a 100644 (file)
@@ -34,6 +34,8 @@
 #include "dce/dce_hwseq.h"
 
 #include "resource.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
 
 #include "gpio_service_interface.h"
 #include "clk_mgr.h"
@@ -519,7 +521,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
                cmd.secure_display.roi_info.y_end = rect->y + rect->height;
        }
 
-       dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+       dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 }
 
 static inline void
@@ -808,7 +810,7 @@ static void dc_destruct(struct dc *dc)
                link_enc_cfg_init(dc, dc->current_state);
 
        if (dc->current_state) {
-               dc_release_state(dc->current_state);
+               dc_state_release(dc->current_state);
                dc->current_state = NULL;
        }
 
@@ -1020,29 +1022,27 @@ static bool dc_construct(struct dc *dc,
        }
 #endif
 
+       if (!create_links(dc, init_params->num_virtual_links))
+               goto fail;
+
+       /* Create additional DIG link encoder objects if fewer than the platform
+        * supports were created during link construction.
+        */
+       if (!create_link_encoders(dc))
+               goto fail;
+
        /* Creation of current_state must occur after dc->dml
         * is initialized in dc_create_resource_pool because
         * on creation it copies the contents of dc->dml
         */
 
-       dc->current_state = dc_create_state(dc);
+       dc->current_state = dc_state_create(dc);
 
        if (!dc->current_state) {
                dm_error("%s: failed to create validate ctx\n", __func__);
                goto fail;
        }
 
-       if (!create_links(dc, init_params->num_virtual_links))
-               goto fail;
-
-       /* Create additional DIG link encoder objects if fewer than the platform
-        * supports were created during link construction.
-        */
-       if (!create_link_encoders(dc))
-               goto fail;
-
-       dc_resource_state_construct(dc, dc->current_state);
-
        return true;
 
 fail:
@@ -1085,7 +1085,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
        }
 }
 
-static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
 {
        if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
                memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
@@ -1105,9 +1105,9 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
                        if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
                                get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
                        else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
-                               get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+                               get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
                        else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
-                               get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+                               get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
                }
        }
 }
@@ -1115,7 +1115,7 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 {
        int i, j;
-       struct dc_state *dangling_context = dc_create_state(dc);
+       struct dc_state *dangling_context = dc_state_create_current_copy(dc);
        struct dc_state *current_ctx;
        struct pipe_ctx *pipe;
        struct timing_generator *tg;
@@ -1123,8 +1123,6 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
        if (dangling_context == NULL)
                return;
 
-       dc_resource_state_copy_construct(dc->current_state, dangling_context);
-
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct dc_stream_state *old_stream =
                                dc->current_state->res_ctx.pipe_ctx[i].stream;
@@ -1161,6 +1159,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
                }
 
                if (should_disable && old_stream) {
+                       bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
                        pipe = &dc->current_state->res_ctx.pipe_ctx[i];
                        tg = pipe->stream_res.tg;
                        /* When disabling plane for a phantom pipe, we must turn on the
@@ -1169,22 +1168,29 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
                         * state that can result in underflow or hang when enabling it
                         * again for different use.
                         */
-                       if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+                       if (is_phantom) {
                                if (tg->funcs->enable_crtc) {
                                        int main_pipe_width, main_pipe_height;
+                                       struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
 
-                                       main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
-                                       main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+                                       main_pipe_width = old_paired_stream->dst.width;
+                                       main_pipe_height = old_paired_stream->dst.height;
                                        if (dc->hwss.blank_phantom)
                                                dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
                                        tg->funcs->enable_crtc(tg);
                                }
                        }
-                       dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+
+                       if (is_phantom)
+                               dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
+                       else
+                               dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
                        disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
 
-                       if (pipe->stream && pipe->plane_state)
-                               dc_update_viusal_confirm_color(dc, context, pipe);
+                       if (pipe->stream && pipe->plane_state) {
+                               set_p_state_switch_method(dc, context, pipe);
+                               dc_update_visual_confirm_color(dc, context, pipe);
+                       }
 
                        if (dc->hwss.apply_ctx_for_surface) {
                                apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
@@ -1203,7 +1209,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
                         * The OTG is set to disable on falling edge of VUPDATE so the plane disable
                         * will still get it's double buffer update.
                         */
-                       if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+                       if (is_phantom) {
                                if (tg->funcs->disable_phantom_crtc)
                                        tg->funcs->disable_phantom_crtc(tg);
                        }
@@ -1212,7 +1218,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 
        current_ctx = dc->current_state;
        dc->current_state = dangling_context;
-       dc_release_state(current_ctx);
+       dc_state_release(current_ctx);
 }
 
 static void disable_vbios_mode_if_required(
@@ -1284,7 +1290,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
                int count = 0;
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
-               if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+               if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
                        continue;
 
                /* Timeout 100 ms */
@@ -1510,7 +1516,7 @@ static void program_timing_sync(
                }
 
                for (k = 0; k < group_size; k++) {
-                       struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
+                       struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
 
                        status->timing_sync_info.group_id = num_group;
                        status->timing_sync_info.group_size = group_size;
@@ -1555,7 +1561,7 @@ static void program_timing_sync(
                if (group_size > 1) {
                        if (sync_type == TIMING_SYNCHRONIZABLE) {
                                dc->hwss.enable_timing_synchronization(
-                                       dc, group_index, group_size, pipe_set);
+                                       dc, ctx, group_index, group_size, pipe_set);
                        } else
                                if (sync_type == VBLANK_SYNCHRONIZABLE) {
                                dc->hwss.enable_vblanks_synchronization(
@@ -1837,7 +1843,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
                struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 
                /* Check old context for SubVP */
-               subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+               subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
                if (subvp_prev_use)
                        break;
        }
@@ -1995,9 +2001,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
        old_state = dc->current_state;
        dc->current_state = context;
 
-       dc_release_state(old_state);
+       dc_state_release(old_state);
 
-       dc_retain_state(dc->current_state);
+       dc_state_retain(dc->current_state);
 
        return result;
 }
@@ -2068,12 +2074,10 @@ enum dc_status dc_commit_streams(struct dc *dc,
        if (handle_exit_odm2to1)
                res = commit_minimal_transition_state(dc, dc->current_state);
 
-       context = dc_create_state(dc);
+       context = dc_state_create_current_copy(dc);
        if (!context)
                goto context_alloc_fail;
 
-       dc_resource_state_copy_construct_current(dc, context);
-
        res = dc_validate_with_context(dc, set, stream_count, context, false);
        if (res != DC_OK) {
                BREAK_TO_DEBUGGER();
@@ -2088,7 +2092,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
                                streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
 
                        if (dc_is_embedded_signal(streams[i]->signal)) {
-                               struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
+                               struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
 
                                if (dc->hwss.is_abm_supported)
                                        status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
@@ -2099,7 +2103,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
        }
 
 fail:
-       dc_release_state(context);
+       dc_state_release(context);
 
 context_alloc_fail:
 
@@ -2153,7 +2157,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
                pipe = &context->res_ctx.pipe_ctx[i];
 
                // Don't check flip pending on phantom pipes
-               if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
+               if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
                        continue;
 
                /* Must set to false to start with, due to OR in update function */
@@ -2211,7 +2215,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
                        if (context->res_ctx.pipe_ctx[i].stream == NULL ||
                                        context->res_ctx.pipe_ctx[i].plane_state == NULL) {
                                context->res_ctx.pipe_ctx[i].pipe_idx = i;
-                               dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+                               dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
                        }
 
                process_deferred_updates(dc);
@@ -2226,103 +2230,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
        dc->wm_optimized_required = false;
 }
 
-static void init_state(struct dc *dc, struct dc_state *context)
-{
-       /* Each context must have their own instance of VBA and in order to
-        * initialize and obtain IP and SOC the base DML instance from DC is
-        * initially copied into every context
-        */
-       memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
-}
-
-struct dc_state *dc_create_state(struct dc *dc)
-{
-       struct dc_state *context = kvzalloc(sizeof(struct dc_state),
-                                           GFP_KERNEL);
-
-       if (!context)
-               return NULL;
-
-       init_state(dc, context);
-
-#ifdef CONFIG_DRM_AMD_DC_FP
-       if (dc->debug.using_dml2) {
-               dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2);
-       }
-#endif
-       kref_init(&context->refcount);
-
-       return context;
-}
-
-struct dc_state *dc_copy_state(struct dc_state *src_ctx)
-{
-       int i, j;
-       struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
-
-       if (!new_ctx)
-               return NULL;
-       memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
-
-#ifdef CONFIG_DRM_AMD_DC_FP
-       if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) {
-               dc_release_state(new_ctx);
-               return NULL;
-       }
-#endif
-
-       for (i = 0; i < MAX_PIPES; i++) {
-                       struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
-
-                       if (cur_pipe->top_pipe)
-                               cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
-
-                       if (cur_pipe->bottom_pipe)
-                               cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
-                       if (cur_pipe->prev_odm_pipe)
-                               cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
-
-                       if (cur_pipe->next_odm_pipe)
-                               cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
-
-       }
-
-       for (i = 0; i < new_ctx->stream_count; i++) {
-                       dc_stream_retain(new_ctx->streams[i]);
-                       for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
-                               dc_plane_state_retain(
-                                       new_ctx->stream_status[i].plane_states[j]);
-       }
-
-       kref_init(&new_ctx->refcount);
-
-       return new_ctx;
-}
-
-void dc_retain_state(struct dc_state *context)
-{
-       kref_get(&context->refcount);
-}
-
-static void dc_state_free(struct kref *kref)
-{
-       struct dc_state *context = container_of(kref, struct dc_state, refcount);
-       dc_resource_state_destruct(context);
-
-#ifdef CONFIG_DRM_AMD_DC_FP
-       dml2_destroy(context->bw_ctx.dml2);
-       context->bw_ctx.dml2 = 0;
-#endif
-
-       kvfree(context);
-}
-
-void dc_release_state(struct dc_state *context)
-{
-       kref_put(&context->refcount, dc_state_free);
-}
-
 bool dc_set_generic_gpio_for_stereo(bool enable,
                struct gpio_service *gpio_service)
 {
@@ -2995,11 +2902,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
                                       update->dsc_config->num_slices_v != 0);
 
                /* Use temporarry context for validating new DSC config */
-               struct dc_state *dsc_validate_context = dc_create_state(dc);
+               struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
 
                if (dsc_validate_context) {
-                       dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
-
                        stream->timing.dsc_cfg = *update->dsc_config;
                        stream->timing.flags.DSC = enable_dsc;
                        if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
@@ -3008,7 +2913,7 @@ static void copy_stream_update_to_stream(struct dc *dc,
                                update->dsc_config = NULL;
                        }
 
-                       dc_release_state(dsc_validate_context);
+                       dc_state_release(dsc_validate_context);
                } else {
                        DC_ERROR("Failed to allocate new validate context for DSC change\n");
                        update->dsc_config = NULL;
@@ -3107,30 +3012,27 @@ static bool update_planes_and_stream_state(struct dc *dc,
                        new_planes[i] = srf_updates[i].surface;
 
                /* initialize scratch memory for building context */
-               context = dc_create_state(dc);
+               context = dc_state_create_copy(dc->current_state);
                if (context == NULL) {
                        DC_ERROR("Failed to allocate new validate context!\n");
                        return false;
                }
 
-               dc_resource_state_copy_construct(
-                               dc->current_state, context);
-
                /* For each full update, remove all existing phantom pipes first.
                 * Ensures that we have enough pipes for newly added MPO planes
                 */
-               if (dc->res_pool->funcs->remove_phantom_pipes)
-                       dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
+               dc_state_remove_phantom_streams_and_planes(dc, context);
+               dc_state_release_phantom_streams_and_planes(dc, context);
 
                /*remove old surfaces from context */
-               if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+               if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
 
                        BREAK_TO_DEBUGGER();
                        goto fail;
                }
 
                /* add surface to context */
-               if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+               if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
 
                        BREAK_TO_DEBUGGER();
                        goto fail;
@@ -3155,19 +3057,6 @@ static bool update_planes_and_stream_state(struct dc *dc,
 
        if (update_type == UPDATE_TYPE_FULL) {
                if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
-                       /* For phantom pipes we remove and create a new set of phantom pipes
-                        * for each full update (because we don't know if we'll need phantom
-                        * pipes until after the first round of validation). However, if validation
-                        * fails we need to keep the existing phantom pipes (because we don't update
-                        * the dc->current_state).
-                        *
-                        * The phantom stream/plane refcount is decremented for validation because
-                        * we assume it'll be removed (the free comes when the dc_state is freed),
-                        * but if validation fails we have to increment back the refcount so it's
-                        * consistent.
-                        */
-                       if (dc->res_pool->funcs->retain_phantom_pipes)
-                               dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
                        BREAK_TO_DEBUGGER();
                        goto fail;
                }
@@ -3188,7 +3077,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
        return true;
 
 fail:
-       dc_release_state(context);
+       dc_state_release(context);
 
        return false;
 
@@ -3384,7 +3273,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
 
                        update_dirty_rect->panel_inst = panel_inst;
                        update_dirty_rect->pipe_idx = j;
-                       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+                       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
                }
        }
 }
@@ -3486,18 +3375,24 @@ static void commit_planes_for_stream_fast(struct dc *dc,
 {
        int i, j;
        struct pipe_ctx *top_pipe_to_program = NULL;
+       struct dc_stream_status *stream_status = NULL;
        dc_z10_restore(dc);
 
        top_pipe_to_program = resource_get_otg_master_for_stream(
                        &context->res_ctx,
                        stream);
 
-       if (dc->debug.visual_confirm) {
-               for (i = 0; i < dc->res_pool->pipe_count; i++) {
-                       struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+       if (!top_pipe_to_program)
+               return;
 
-                       if (pipe->stream && pipe->plane_state)
-                               dc_update_viusal_confirm_color(dc, context, pipe);
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+               if (pipe->stream && pipe->plane_state) {
+                       set_p_state_switch_method(dc, context, pipe);
+
+                       if (dc->debug.visual_confirm)
+                               dc_update_visual_confirm_color(dc, context, pipe);
                }
        }
 
@@ -3521,6 +3416,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
                }
        }
 
+       stream_status = dc_state_get_stream_status(context, stream);
+
        build_dmub_cmd_list(dc,
                        srf_updates,
                        surface_count,
@@ -3533,7 +3430,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
                        context->dmub_cmd_count,
                        context->block_sequence,
                        &(context->block_sequence_steps),
-                       top_pipe_to_program);
+                       top_pipe_to_program,
+                       stream_status);
        hwss_execute_sequence(dc,
                        context->block_sequence,
                        context->block_sequence_steps);
@@ -3624,12 +3522,12 @@ static void commit_planes_for_stream(struct dc *dc,
        top_pipe_to_program = resource_get_otg_master_for_stream(
                                &context->res_ctx,
                                stream);
-
+       ASSERT(top_pipe_to_program != NULL);
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 
                // Check old context for SubVP
-               subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+               subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
                if (subvp_prev_use)
                        break;
        }
@@ -3637,19 +3535,22 @@ static void commit_planes_for_stream(struct dc *dc,
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
-               if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+               if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
                        subvp_curr_use = true;
                        break;
                }
        }
 
-       if (dc->debug.visual_confirm)
-               for (i = 0; i < dc->res_pool->pipe_count; i++) {
-                       struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+               if (pipe->stream && pipe->plane_state) {
+                       set_p_state_switch_method(dc, context, pipe);
 
-                       if (pipe->stream && pipe->plane_state)
-                               dc_update_viusal_confirm_color(dc, context, pipe);
+                       if (dc->debug.visual_confirm)
+                               dc_update_visual_confirm_color(dc, context, pipe);
                }
+       }
 
        if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
                struct pipe_ctx *mpcc_pipe;
@@ -4022,7 +3923,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 
-               if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+               if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
                        subvp_active = true;
                        break;
                }
@@ -4059,7 +3960,7 @@ struct pipe_split_policy_backup {
 static void release_minimal_transition_state(struct dc *dc,
                struct dc_state *context, struct pipe_split_policy_backup *policy)
 {
-       dc_release_state(context);
+       dc_state_release(context);
        /* restore previous pipe split and odm policy */
        if (!dc->config.is_vmin_only_asic)
                dc->debug.pipe_split_policy = policy->mpc_policy;
@@ -4070,7 +3971,7 @@ static void release_minimal_transition_state(struct dc *dc,
 static struct dc_state *create_minimal_transition_state(struct dc *dc,
                struct dc_state *base_context, struct pipe_split_policy_backup *policy)
 {
-       struct dc_state *minimal_transition_context = dc_create_state(dc);
+       struct dc_state *minimal_transition_context = NULL;
        unsigned int i, j;
 
        if (!dc->config.is_vmin_only_asic) {
@@ -4082,7 +3983,9 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
        policy->subvp_policy = dc->debug.force_disable_subvp;
        dc->debug.force_disable_subvp = true;
 
-       dc_resource_state_copy_construct(base_context, minimal_transition_context);
+       minimal_transition_context = dc_state_create_copy(base_context);
+       if (!minimal_transition_context)
+               return NULL;
 
        /* commit minimal state */
        if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
@@ -4114,7 +4017,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
        bool success = false;
        struct dc_state *minimal_transition_context;
        struct pipe_split_policy_backup policy;
-       struct mall_temp_config mall_temp_config;
 
        /* commit based on new context */
        /* Since all phantom pipes are removed in full validation,
@@ -4123,8 +4025,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
         * pipe as subvp/phantom will be cleared (dc copy constructor
         * creates a shallow copy).
         */
-       if (dc->res_pool->funcs->save_mall_state)
-               dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
        minimal_transition_context = create_minimal_transition_state(dc,
                        context, &policy);
        if (minimal_transition_context) {
@@ -4137,16 +4037,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
                        success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
                }
                release_minimal_transition_state(dc, minimal_transition_context, &policy);
-               if (dc->res_pool->funcs->restore_mall_state)
-                       dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
-               /* If we do a minimal transition with plane removal and the context
-                * has subvp we also have to retain back the phantom stream / planes
-                * since the refcount is decremented as part of the min transition
-                * (we commit a state with no subvp, so the phantom streams / planes
-                * had to be removed).
-                */
-               if (dc->res_pool->funcs->retain_phantom_pipes)
-                       dc->res_pool->funcs->retain_phantom_pipes(dc, context);
        }
 
        if (!success) {
@@ -4214,7 +4104,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 
-               if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+               if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
                        subvp_in_use = true;
                        break;
                }
@@ -4455,6 +4345,8 @@ static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc,
 
        cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream);
        new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+       if (!cur_pipe || !new_pipe)
+               return false;
        cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1;
        new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1;
        if (cur_is_odm_in_use == new_is_odm_in_use)
@@ -4480,7 +4372,6 @@ bool dc_update_planes_and_stream(struct dc *dc,
        struct dc_state *context;
        enum surface_update_type update_type;
        int i;
-       struct mall_temp_config mall_temp_config;
        struct dc_fast_update fast_update[MAX_SURFACES] = {0};
 
        /* In cases where MPO and split or ODM are used transitions can
@@ -4524,23 +4415,10 @@ bool dc_update_planes_and_stream(struct dc *dc,
                 * pipe as subvp/phantom will be cleared (dc copy constructor
                 * creates a shallow copy).
                 */
-               if (dc->res_pool->funcs->save_mall_state)
-                       dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
                if (!commit_minimal_transition_state(dc, context)) {
-                       dc_release_state(context);
+                       dc_state_release(context);
                        return false;
                }
-               if (dc->res_pool->funcs->restore_mall_state)
-                       dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
-
-               /* If we do a minimal transition with plane removal and the context
-                * has subvp we also have to retain back the phantom stream / planes
-                * since the refcount is decremented as part of the min transition
-                * (we commit a state with no subvp, so the phantom streams / planes
-                * had to be removed).
-                */
-               if (dc->res_pool->funcs->retain_phantom_pipes)
-                       dc->res_pool->funcs->retain_phantom_pipes(dc, context);
                update_type = UPDATE_TYPE_FULL;
        }
 
@@ -4597,7 +4475,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
                struct dc_state *old = dc->current_state;
 
                dc->current_state = context;
-               dc_release_state(old);
+               dc_state_release(old);
 
                // clear any forced full updates
                for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -4656,14 +4534,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
        if (update_type >= UPDATE_TYPE_FULL) {
 
                /* initialize scratch memory for building context */
-               context = dc_create_state(dc);
+               context = dc_state_create_copy(state);
                if (context == NULL) {
                        DC_ERROR("Failed to allocate new validate context!\n");
                        return;
                }
 
-               dc_resource_state_copy_construct(state, context);
-
                for (i = 0; i < dc->res_pool->pipe_count; i++) {
                        struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
                        struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4702,7 +4578,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
        if (update_type >= UPDATE_TYPE_FULL) {
                if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
                        DC_ERROR("Mode validation failed for stream update!\n");
-                       dc_release_state(context);
+                       dc_state_release(context);
                        return;
                }
        }
@@ -4735,7 +4611,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
                struct dc_state *old = dc->current_state;
 
                dc->current_state = context;
-               dc_release_state(old);
+               dc_state_release(old);
 
                for (i = 0; i < dc->res_pool->pipe_count; i++) {
                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -4808,7 +4684,7 @@ void dc_set_power_state(
 
        switch (power_state) {
        case DC_ACPI_CM_POWER_STATE_D0:
-               dc_resource_state_construct(dc, dc->current_state);
+               dc_state_construct(dc, dc->current_state);
 
                dc_z10_restore(dc);
 
@@ -4823,7 +4699,7 @@ void dc_set_power_state(
        default:
                ASSERT(dc->current_state->stream_count == 0);
 
-               dc_resource_state_destruct(dc->current_state);
+               dc_state_destruct(dc->current_state);
 
                break;
        }
@@ -4900,6 +4776,38 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
        return true;
 }
 
+/* enable/disable eDP Replay without specify stream for eDP */
+bool dc_set_replay_allow_active(struct dc *dc, bool active)
+{
+       int i;
+       bool allow_active;
+
+       for (i = 0; i < dc->current_state->stream_count; i++) {
+               struct dc_link *link;
+               struct dc_stream_state *stream = dc->current_state->streams[i];
+
+               link = stream->link;
+               if (!link)
+                       continue;
+
+               if (link->replay_settings.replay_feature_enabled) {
+                       if (active && !link->replay_settings.replay_allow_active) {
+                               allow_active = true;
+                               if (!dc_link_set_replay_allow_active(link, &allow_active,
+                                       false, false, NULL))
+                                       return false;
+                       } else if (!active && link->replay_settings.replay_allow_active) {
+                               allow_active = false;
+                               if (!dc_link_set_replay_allow_active(link, &allow_active,
+                                       true, false, NULL))
+                                       return false;
+                       }
+               }
+       }
+
+       return true;
+}
+
 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
 {
        if (dc->debug.disable_idle_power_optimizations)
@@ -5093,18 +5001,28 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
  */
 bool dc_is_dmub_outbox_supported(struct dc *dc)
 {
-       /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
-       if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
-           dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
-           !dc->debug.dpia_debug.bits.disable_dpia)
-               return true;
+       switch (dc->ctx->asic_id.chip_family) {
 
-       if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
-           !dc->debug.dpia_debug.bits.disable_dpia)
-               return true;
+       case FAMILY_YELLOW_CARP:
+               /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
+               if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+                   !dc->debug.dpia_debug.bits.disable_dpia)
+                       return true;
+       break;
+
+       case AMDGPU_FAMILY_GC_11_0_1:
+       case AMDGPU_FAMILY_GC_11_5_0:
+               if (!dc->debug.dpia_debug.bits.disable_dpia)
+                       return true;
+       break;
+
+       default:
+               break;
+       }
 
        /* dmub aux needs dmub notifications to be enabled */
        return dc->debug.enable_dmub_aux_for_legacy_ddc;
+
 }
 
 /**
@@ -5201,7 +5119,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
                        );
        }
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
@@ -5255,7 +5173,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
        cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
        cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
 
-       if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
+       if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
                /* command is not processed by dmub */
                notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
                return is_cmd_complete;
@@ -5298,7 +5216,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
        cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
        cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
 
-       if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+       if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
                /* command is not processed by dmub */
                return DC_ERROR_UNEXPECTED;
 
@@ -5336,7 +5254,7 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
        cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
        cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
 }
@@ -5435,6 +5353,8 @@ bool dc_abm_save_restore(
        struct dc_link *link = stream->sink->link;
        struct dc_link *edp_links[MAX_NUM_EDP];
 
+       if (link->replay_settings.replay_feature_enabled)
+               return false;
 
        /*find primary pipe associated with stream*/
        for (i = 0; i < MAX_PIPES; i++) {
index fe07160932d6960ad88cc28c24bc06675b16f019..9c05b1a07142fe70df4184299fe1f18a2411ce6b 100644 (file)
@@ -31,6 +31,7 @@
 #include "basics/dc_common.h"
 #include "resource.h"
 #include "dc_dmub_srv.h"
+#include "dc_state_priv.h"
 
 #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
 
@@ -425,45 +426,130 @@ void get_hdr_visual_confirm_color(
 }
 
 void get_subvp_visual_confirm_color(
-               struct dc *dc,
-               struct dc_state *context,
                struct pipe_ctx *pipe_ctx,
                struct tg_color *color)
 {
        uint32_t color_value = MAX_TG_COLOR_VALUE;
-       bool enable_subvp = false;
-       int i;
-
-       if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context)
-               return;
+       if (pipe_ctx) {
+               switch (pipe_ctx->p_state_type) {
+               case P_STATE_SUB_VP:
+                       color->color_r_cr = color_value;
+                       color->color_g_y  = 0;
+                       color->color_b_cb = 0;
+                       break;
+               case P_STATE_DRR_SUB_VP:
+                       color->color_r_cr = 0;
+                       color->color_g_y  = color_value;
+                       color->color_b_cb = 0;
+                       break;
+               case P_STATE_V_BLANK_SUB_VP:
+                       color->color_r_cr = 0;
+                       color->color_g_y  = 0;
+                       color->color_b_cb = color_value;
+                       break;
+               default:
+                       break;
+               }
+       }
+}
 
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+void get_mclk_switch_visual_confirm_color(
+               struct pipe_ctx *pipe_ctx,
+               struct tg_color *color)
+{
+       uint32_t color_value = MAX_TG_COLOR_VALUE;
 
-               if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
-                   pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
-                       /* SubVP enable - red */
-                       color->color_g_y = 0;
+       if (pipe_ctx) {
+               switch (pipe_ctx->p_state_type) {
+               case P_STATE_V_BLANK:
+                       color->color_r_cr = color_value;
+                       color->color_g_y = color_value;
                        color->color_b_cb = 0;
+                       break;
+               case P_STATE_FPO:
+                       color->color_r_cr = 0;
+                       color->color_g_y  = color_value;
+                       color->color_b_cb = color_value;
+                       break;
+               case P_STATE_V_ACTIVE:
                        color->color_r_cr = color_value;
-                       enable_subvp = true;
-
-                       if (pipe_ctx->stream == pipe->stream)
-                               return;
+                       color->color_g_y  = 0;
+                       color->color_b_cb = color_value;
+                       break;
+               case P_STATE_SUB_VP:
+                       color->color_r_cr = color_value;
+                       color->color_g_y  = 0;
+                       color->color_b_cb = 0;
+                       break;
+               case P_STATE_DRR_SUB_VP:
+                       color->color_r_cr = 0;
+                       color->color_g_y  = color_value;
+                       color->color_b_cb = 0;
+                       break;
+               case P_STATE_V_BLANK_SUB_VP:
+                       color->color_r_cr = 0;
+                       color->color_g_y  = 0;
+                       color->color_b_cb = color_value;
+                       break;
+               default:
                        break;
                }
        }
+}
 
-       if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) {
-               color->color_r_cr = 0;
-               if (pipe_ctx->stream->allow_freesync == 1) {
-                       /* SubVP enable and DRR on - green */
-                       color->color_b_cb = 0;
-                       color->color_g_y = color_value;
+void set_p_state_switch_method(
+               struct dc *dc,
+               struct dc_state *context,
+               struct pipe_ctx *pipe_ctx)
+{
+       struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+       bool enable_subvp;
+
+       if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
+               return;
+
+       if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
+                       dm_dram_clock_change_unsupported) {
+               /* MCLK switching is supported */
+               if (!pipe_ctx->has_vactive_margin) {
+                       /* In Vblank - yellow */
+                       pipe_ctx->p_state_type = P_STATE_V_BLANK;
+
+                       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+                               /* FPO + Vblank - cyan */
+                               pipe_ctx->p_state_type = P_STATE_FPO;
+                       }
                } else {
-                       /* SubVP enable and No DRR - blue */
-                       color->color_g_y = 0;
-                       color->color_b_cb = color_value;
+                       /* In Vactive - pink */
+                       pipe_ctx->p_state_type = P_STATE_V_ACTIVE;
+               }
+
+               /* SubVP */
+               enable_subvp = false;
+
+               for (int i = 0; i < dc->res_pool->pipe_count; i++) {
+                       struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+                       if (pipe->stream && dc_state_get_paired_subvp_stream(context, pipe->stream) &&
+                                       dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+                               /* SubVP enable - red */
+                               pipe_ctx->p_state_type = P_STATE_SUB_VP;
+                               enable_subvp = true;
+
+                               if (pipe_ctx->stream == pipe->stream)
+                                       return;
+                               break;
+                       }
+               }
+
+               if (enable_subvp && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_NONE) {
+                       if (pipe_ctx->stream->allow_freesync == 1) {
+                               /* SubVP enable and DRR on - green */
+                               pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP;
+                       } else {
+                               /* SubVP enable and No DRR - blue */
+                               pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP;
+                       }
                }
        }
 }
@@ -473,7 +559,8 @@ void hwss_build_fast_sequence(struct dc *dc,
                unsigned int dmub_cmd_count,
                struct block_sequence block_sequence[],
                int *num_steps,
-               struct pipe_ctx *pipe_ctx)
+               struct pipe_ctx *pipe_ctx,
+               struct dc_stream_status *stream_status)
 {
        struct dc_plane_state *plane = pipe_ctx->plane_state;
        struct dc_stream_state *stream = pipe_ctx->stream;
@@ -490,7 +577,8 @@ void hwss_build_fast_sequence(struct dc *dc,
        if (dc->hwss.subvp_pipe_control_lock_fast) {
                block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
                block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true;
-               block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
+               block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip =
+                               plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN;
                block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
                (*num_steps)++;
        }
@@ -529,7 +617,7 @@ void hwss_build_fast_sequence(struct dc *dc,
                        }
                        if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
                                if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
-                                               current_mpc_pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+                                               stream_status->mall_stream_config.type == SUBVP_MAIN) {
                                        block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
                                        block_sequence[*num_steps].params.subvp_save_surf_addr.addr = &current_mpc_pipe->plane_state->address;
                                        block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
@@ -612,7 +700,8 @@ void hwss_build_fast_sequence(struct dc *dc,
        if (dc->hwss.subvp_pipe_control_lock_fast) {
                block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
                block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false;
-               block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
+               block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip =
+                               plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN;
                block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
                (*num_steps)++;
        }
@@ -724,7 +813,7 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params)
        union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd;
        enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type;
 
-       dm_execute_dmub_cmd(ctx, cmd, wait_type);
+       dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type);
 }
 
 void hwss_program_manual_trigger(union block_sequence_params *params)
@@ -812,42 +901,6 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params)
        dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
 }
 
-void get_mclk_switch_visual_confirm_color(
-               struct dc *dc,
-               struct dc_state *context,
-               struct pipe_ctx *pipe_ctx,
-               struct tg_color *color)
-{
-       uint32_t color_value = MAX_TG_COLOR_VALUE;
-       struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
-
-       if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
-               return;
-
-       if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
-                       dm_dram_clock_change_unsupported) {
-               /* MCLK switching is supported */
-               if (!pipe_ctx->has_vactive_margin) {
-                       /* In Vblank - yellow */
-                       color->color_r_cr = color_value;
-                       color->color_g_y = color_value;
-
-                       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
-                               /* FPO + Vblank - cyan */
-                               color->color_r_cr = 0;
-                               color->color_g_y  = color_value;
-                               color->color_b_cb = color_value;
-                       }
-               } else {
-                       /* In Vactive - pink */
-                       color->color_r_cr = color_value;
-                       color->color_b_cb = color_value;
-               }
-               /* SubVP */
-               get_subvp_visual_confirm_color(dc, context, pipe_ctx, color);
-       }
-}
-
 void get_surface_tile_visual_confirm_color(
                struct pipe_ctx *pipe_ctx,
                struct tg_color *color)
index ed94187c2afa2dd1aea2098c88b0be1d18876990..c6c35037bdb8b75d538b8d6f1af08556bd4e2236 100644 (file)
@@ -467,6 +467,13 @@ bool dc_link_setup_psr(struct dc_link *link,
        return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
 }
 
+bool dc_link_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
+               bool wait, bool force_static, const unsigned int *power_opts)
+{
+       return link->dc->link_srv->edp_set_replay_allow_active(link, allow_active, wait,
+                       force_static, power_opts);
+}
+
 bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state)
 {
        return link->dc->link_srv->edp_get_replay_state(link, state);
@@ -497,7 +504,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
        link->dc->link_srv->enable_hpd_filter(link, enable);
 }
 
-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
 {
        return dc->link_srv->validate_dpia_bandwidth(streams, count);
 }
index 4382d9ae4292da4cff35942963af2bc2db9904ca..9fbdb09697fd5ea16abe86e4f970e80fb764ff7f 100644 (file)
@@ -42,6 +42,7 @@
 #include "link_enc_cfg.h"
 #include "link.h"
 #include "clk_mgr.h"
+#include "dc_state_priv.h"
 #include "virtual/virtual_link_hwss.h"
 #include "link/hwss/link_hwss_dio.h"
 #include "link/hwss/link_hwss_dpia.h"
@@ -2193,6 +2194,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
        for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
                otg_master = resource_get_otg_master_for_stream(
                                &state->res_ctx, state->streams[stream_idx]);
+               if (!otg_master || otg_master->stream_res.tg == NULL) {
+                       DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx);
+                       return;
+               }
                slice_count = resource_get_opp_heads_for_otg_master(otg_master,
                                &state->res_ctx, opp_heads);
                for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
@@ -2459,6 +2464,9 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context,
        struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(
                        &context->res_ctx, stream);
 
+       if (!otg_master)
+               return;
+
        ASSERT(resource_get_odm_slice_count(otg_master) == 1);
        ASSERT(otg_master->plane_state == NULL);
        ASSERT(otg_master->stream_res.stream_enc);
@@ -2993,189 +3001,6 @@ bool resource_update_pipes_for_plane_with_slice_count(
        return result;
 }
 
-bool dc_add_plane_to_context(
-               const struct dc *dc,
-               struct dc_stream_state *stream,
-               struct dc_plane_state *plane_state,
-               struct dc_state *context)
-{
-       struct resource_pool *pool = dc->res_pool;
-       struct pipe_ctx *otg_master_pipe;
-       struct dc_stream_status *stream_status = NULL;
-       bool added = false;
-
-       stream_status = dc_stream_get_status_from_state(context, stream);
-       if (stream_status == NULL) {
-               dm_error("Existing stream not found; failed to attach surface!\n");
-               goto out;
-       } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
-               dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
-                               plane_state, MAX_SURFACE_NUM);
-               goto out;
-       }
-
-       otg_master_pipe = resource_get_otg_master_for_stream(
-                       &context->res_ctx, stream);
-       added = resource_append_dpp_pipes_for_plane_composition(context,
-                       dc->current_state, pool, otg_master_pipe, plane_state);
-
-       if (added) {
-               stream_status->plane_states[stream_status->plane_count] =
-                               plane_state;
-               stream_status->plane_count++;
-               dc_plane_state_retain(plane_state);
-       }
-
-out:
-       return added;
-}
-
-bool dc_remove_plane_from_context(
-               const struct dc *dc,
-               struct dc_stream_state *stream,
-               struct dc_plane_state *plane_state,
-               struct dc_state *context)
-{
-       int i;
-       struct dc_stream_status *stream_status = NULL;
-       struct resource_pool *pool = dc->res_pool;
-
-       if (!plane_state)
-               return true;
-
-       for (i = 0; i < context->stream_count; i++)
-               if (context->streams[i] == stream) {
-                       stream_status = &context->stream_status[i];
-                       break;
-               }
-
-       if (stream_status == NULL) {
-               dm_error("Existing stream not found; failed to remove plane.\n");
-               return false;
-       }
-
-       resource_remove_dpp_pipes_for_plane_composition(
-                       context, pool, plane_state);
-
-       for (i = 0; i < stream_status->plane_count; i++) {
-               if (stream_status->plane_states[i] == plane_state) {
-                       dc_plane_state_release(stream_status->plane_states[i]);
-                       break;
-               }
-       }
-
-       if (i == stream_status->plane_count) {
-               dm_error("Existing plane_state not found; failed to detach it!\n");
-               return false;
-       }
-
-       stream_status->plane_count--;
-
-       /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
-       for (; i < stream_status->plane_count; i++)
-               stream_status->plane_states[i] = stream_status->plane_states[i + 1];
-
-       stream_status->plane_states[stream_status->plane_count] = NULL;
-
-       if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
-               /* ODM combine could prevent us from supporting more planes
-                * we will reset ODM slice count back to 1 when all planes have
-                * been removed to maximize the amount of planes supported when
-                * new planes are added.
-                */
-               resource_update_pipes_for_stream_with_slice_count(
-                               context, dc->current_state, dc->res_pool, stream, 1);
-
-       return true;
-}
-
-/**
- * dc_rem_all_planes_for_stream - Remove planes attached to the target stream.
- *
- * @dc: Current dc state.
- * @stream: Target stream, which we want to remove the attached plans.
- * @context: New context.
- *
- * Return:
- * Return true if DC was able to remove all planes from the target
- * stream, otherwise, return false.
- */
-bool dc_rem_all_planes_for_stream(
-               const struct dc *dc,
-               struct dc_stream_state *stream,
-               struct dc_state *context)
-{
-       int i, old_plane_count;
-       struct dc_stream_status *stream_status = NULL;
-       struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
-
-       for (i = 0; i < context->stream_count; i++)
-                       if (context->streams[i] == stream) {
-                               stream_status = &context->stream_status[i];
-                               break;
-                       }
-
-       if (stream_status == NULL) {
-               dm_error("Existing stream %p not found!\n", stream);
-               return false;
-       }
-
-       old_plane_count = stream_status->plane_count;
-
-       for (i = 0; i < old_plane_count; i++)
-               del_planes[i] = stream_status->plane_states[i];
-
-       for (i = 0; i < old_plane_count; i++)
-               if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context))
-                       return false;
-
-       return true;
-}
-
-static bool add_all_planes_for_stream(
-               const struct dc *dc,
-               struct dc_stream_state *stream,
-               const struct dc_validation_set set[],
-               int set_count,
-               struct dc_state *context)
-{
-       int i, j;
-
-       for (i = 0; i < set_count; i++)
-               if (set[i].stream == stream)
-                       break;
-
-       if (i == set_count) {
-               dm_error("Stream %p not found in set!\n", stream);
-               return false;
-       }
-
-       for (j = 0; j < set[i].plane_count; j++)
-               if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context))
-                       return false;
-
-       return true;
-}
-
-bool dc_add_all_planes_for_stream(
-               const struct dc *dc,
-               struct dc_stream_state *stream,
-               struct dc_plane_state * const *plane_states,
-               int plane_count,
-               struct dc_state *context)
-{
-       struct dc_validation_set set;
-       int i;
-
-       set.stream = stream;
-       set.plane_count = plane_count;
-
-       for (i = 0; i < plane_count; i++)
-               set.plane_states[i] = plane_states[i];
-
-       return add_all_planes_for_stream(dc, stream, &set, 1, context);
-}
-
 bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
                       struct dc_stream_state *new_stream)
 {
@@ -3327,84 +3152,6 @@ static struct audio *find_first_free_audio(
        return NULL;
 }
 
-/*
- * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
- */
-enum dc_status dc_add_stream_to_ctx(
-               struct dc *dc,
-               struct dc_state *new_ctx,
-               struct dc_stream_state *stream)
-{
-       enum dc_status res;
-       DC_LOGGER_INIT(dc->ctx->logger);
-
-       if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
-               DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
-               return DC_ERROR_UNEXPECTED;
-       }
-
-       new_ctx->streams[new_ctx->stream_count] = stream;
-       dc_stream_retain(stream);
-       new_ctx->stream_count++;
-
-       res = resource_add_otg_master_for_stream_output(
-                       new_ctx, dc->res_pool, stream);
-       if (res != DC_OK)
-               DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
-
-       return res;
-}
-
-/*
- * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
- */
-enum dc_status dc_remove_stream_from_ctx(
-                       struct dc *dc,
-                       struct dc_state *new_ctx,
-                       struct dc_stream_state *stream)
-{
-       int i;
-       struct dc_context *dc_ctx = dc->ctx;
-       struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(
-                       &new_ctx->res_ctx, stream);
-
-       if (!del_pipe) {
-               DC_ERROR("Pipe not found for stream %p !\n", stream);
-               return DC_ERROR_UNEXPECTED;
-       }
-
-       resource_update_pipes_for_stream_with_slice_count(new_ctx,
-                       dc->current_state, dc->res_pool, stream, 1);
-       resource_remove_otg_master_for_stream_output(
-                       new_ctx, dc->res_pool, stream);
-
-       for (i = 0; i < new_ctx->stream_count; i++)
-               if (new_ctx->streams[i] == stream)
-                       break;
-
-       if (new_ctx->streams[i] != stream) {
-               DC_ERROR("Context doesn't have stream %p !\n", stream);
-               return DC_ERROR_UNEXPECTED;
-       }
-
-       dc_stream_release(new_ctx->streams[i]);
-       new_ctx->stream_count--;
-
-       /* Trim back arrays */
-       for (; i < new_ctx->stream_count; i++) {
-               new_ctx->streams[i] = new_ctx->streams[i + 1];
-               new_ctx->stream_status[i] = new_ctx->stream_status[i + 1];
-       }
-
-       new_ctx->streams[new_ctx->stream_count] = NULL;
-       memset(
-                       &new_ctx->stream_status[new_ctx->stream_count],
-                       0,
-                       sizeof(new_ctx->stream_status[0]));
-
-       return DC_OK;
-}
-
 static struct dc_stream_state *find_pll_sharable_stream(
                struct dc_stream_state *stream_needs_pll,
                struct dc_state *context)
@@ -3784,34 +3531,6 @@ enum dc_status resource_map_pool_resources(
        return DC_ERROR_UNEXPECTED;
 }
 
-/**
- * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
- *
- * @dc: copy out of dc->current_state
- * @dst_ctx: copy into this
- *
- * This function makes a shallow copy of the current DC state and increments
- * refcounts on existing streams and planes.
- */
-void dc_resource_state_copy_construct_current(
-               const struct dc *dc,
-               struct dc_state *dst_ctx)
-{
-       dc_resource_state_copy_construct(dc->current_state, dst_ctx);
-}
-
-
-void dc_resource_state_construct(
-               const struct dc *dc,
-               struct dc_state *dst_ctx)
-{
-       dst_ctx->clk_mgr = dc->clk_mgr;
-
-       /* Initialise DIG link encoder resource tracking variables. */
-       link_enc_cfg_init(dc, dst_ctx);
-}
-
-
 bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
 {
        if (dc->res_pool == NULL)
@@ -3855,6 +3574,31 @@ static bool planes_changed_for_existing_stream(struct dc_state *context,
        return false;
 }
 
+static bool add_all_planes_for_stream(
+               const struct dc *dc,
+               struct dc_stream_state *stream,
+               const struct dc_validation_set set[],
+               int set_count,
+               struct dc_state *state)
+{
+       int i, j;
+
+       for (i = 0; i < set_count; i++)
+               if (set[i].stream == stream)
+                       break;
+
+       if (i == set_count) {
+               dm_error("Stream %p not found in set!\n", stream);
+               return false;
+       }
+
+       for (j = 0; j < set[i].plane_count; j++)
+               if (!dc_state_add_plane(dc, stream, set[i].plane_states[j], state))
+                       return false;
+
+       return true;
+}
+
 /**
  * dc_validate_with_context - Validate and update the potential new stream in the context object
  *
@@ -3960,7 +3704,8 @@ enum dc_status dc_validate_with_context(struct dc *dc,
                                                       unchanged_streams[i],
                                                       set,
                                                       set_count)) {
-                       if (!dc_rem_all_planes_for_stream(dc,
+
+                       if (!dc_state_rem_all_planes_for_stream(dc,
                                                          unchanged_streams[i],
                                                          context)) {
                                res = DC_FAIL_DETACH_SURFACES;
@@ -3982,12 +3727,24 @@ enum dc_status dc_validate_with_context(struct dc *dc,
                        }
                }
 
-               if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
-                       res = DC_FAIL_DETACH_SURFACES;
-                       goto fail;
+               if (dc_state_get_stream_subvp_type(context, del_streams[i]) == SUBVP_PHANTOM) {
+                       /* remove phantoms specifically */
+                       if (!dc_state_rem_all_phantom_planes_for_stream(dc, del_streams[i], context, true)) {
+                               res = DC_FAIL_DETACH_SURFACES;
+                               goto fail;
+                       }
+
+                       res = dc_state_remove_phantom_stream(dc, context, del_streams[i]);
+                       dc_state_release_phantom_stream(dc, context, del_streams[i]);
+               } else {
+                       if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+                               res = DC_FAIL_DETACH_SURFACES;
+                               goto fail;
+                       }
+
+                       res = dc_state_remove_stream(dc, context, del_streams[i]);
                }
 
-               res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
                if (res != DC_OK)
                        goto fail;
        }
@@ -4010,7 +3767,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
        /* Add new streams and then add all planes for the new stream */
        for (i = 0; i < add_streams_count; i++) {
                calculate_phy_pix_clks(add_streams[i]);
-               res = dc_add_stream_to_ctx(dc, context, add_streams[i]);
+               res = dc_state_add_stream(dc, context, add_streams[i]);
                if (res != DC_OK)
                        goto fail;
 
@@ -4516,84 +4273,6 @@ static void set_vtem_info_packet(
        *info_packet = stream->vtem_infopacket;
 }
 
-void dc_resource_state_destruct(struct dc_state *context)
-{
-       int i, j;
-
-       for (i = 0; i < context->stream_count; i++) {
-               for (j = 0; j < context->stream_status[i].plane_count; j++)
-                       dc_plane_state_release(
-                               context->stream_status[i].plane_states[j]);
-
-               context->stream_status[i].plane_count = 0;
-               dc_stream_release(context->streams[i]);
-               context->streams[i] = NULL;
-       }
-       context->stream_count = 0;
-       context->stream_mask = 0;
-       memset(&context->res_ctx, 0, sizeof(context->res_ctx));
-       memset(&context->pp_display_cfg, 0, sizeof(context->pp_display_cfg));
-       memset(&context->dcn_bw_vars, 0, sizeof(context->dcn_bw_vars));
-       context->clk_mgr = NULL;
-       memset(&context->bw_ctx.bw, 0, sizeof(context->bw_ctx.bw));
-       memset(context->block_sequence, 0, sizeof(context->block_sequence));
-       context->block_sequence_steps = 0;
-       memset(context->dc_dmub_cmd, 0, sizeof(context->dc_dmub_cmd));
-       context->dmub_cmd_count = 0;
-       memset(&context->perf_params, 0, sizeof(context->perf_params));
-       memset(&context->scratch, 0, sizeof(context->scratch));
-}
-
-void dc_resource_state_copy_construct(
-               const struct dc_state *src_ctx,
-               struct dc_state *dst_ctx)
-{
-       int i, j;
-       struct kref refcount = dst_ctx->refcount;
-#ifdef CONFIG_DRM_AMD_DC_FP
-       struct dml2_context *dml2 = NULL;
-
-       // Need to preserve allocated dml2 context
-       if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
-               dml2 = dst_ctx->bw_ctx.dml2;
-#endif
-
-       *dst_ctx = *src_ctx;
-
-#ifdef CONFIG_DRM_AMD_DC_FP
-       // Preserve allocated dml2 context
-       if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
-               dst_ctx->bw_ctx.dml2 = dml2;
-#endif
-
-       for (i = 0; i < MAX_PIPES; i++) {
-               struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
-
-               if (cur_pipe->top_pipe)
-                       cur_pipe->top_pipe =  &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
-
-               if (cur_pipe->bottom_pipe)
-                       cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
-               if (cur_pipe->next_odm_pipe)
-                       cur_pipe->next_odm_pipe =  &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
-
-               if (cur_pipe->prev_odm_pipe)
-                       cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
-       }
-
-       for (i = 0; i < dst_ctx->stream_count; i++) {
-               dc_stream_retain(dst_ctx->streams[i]);
-               for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++)
-                       dc_plane_state_retain(
-                               dst_ctx->stream_status[i].plane_states[j]);
-       }
-
-       /* context refcount should not be overridden */
-       dst_ctx->refcount = refcount;
-
-}
-
 struct clock_source *dc_resource_find_first_free_pll(
                struct resource_context *res_ctx,
                const struct resource_pool *pool)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
new file mode 100644 (file)
index 0000000..88c6436
--- /dev/null
@@ -0,0 +1,867 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "core_types.h"
+#include "core_status.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
+#include "dc_stream_priv.h"
+#include "dc_plane_priv.h"
+
+#include "dm_services.h"
+#include "resource.h"
+#include "link_enc_cfg.h"
+
+#include "dml2/dml2_wrapper.h"
+#include "dml2/dml2_internal_types.h"
+
+#define DC_LOGGER \
+       dc->ctx->logger
+#define DC_LOGGER_INIT(logger)
+
+/* Private dc_state helper functions */
+static bool dc_state_track_phantom_stream(struct dc_state *state,
+               struct dc_stream_state *phantom_stream)
+{
+       if (state->phantom_stream_count >= MAX_PHANTOM_PIPES)
+               return false;
+
+       state->phantom_streams[state->phantom_stream_count++] = phantom_stream;
+
+       return true;
+}
+
+static bool dc_state_untrack_phantom_stream(struct dc_state *state, struct dc_stream_state *phantom_stream)
+{
+       bool res = false;
+       int i;
+
+       /* first find phantom stream in the dc_state */
+       for (i = 0; i < state->phantom_stream_count; i++) {
+               if (state->phantom_streams[i] == phantom_stream) {
+                       state->phantom_streams[i] = NULL;
+                       res = true;
+                       break;
+               }
+       }
+
+       /* failed to find stream in state */
+       if (!res)
+               return res;
+
+       /* trim back phantom streams */
+       state->phantom_stream_count--;
+       for (; i < state->phantom_stream_count; i++)
+               state->phantom_streams[i] = state->phantom_streams[i + 1];
+
+       return res;
+}
+
+static bool dc_state_is_phantom_stream_tracked(struct dc_state *state, struct dc_stream_state *phantom_stream)
+{
+       int i;
+
+       for (i = 0; i < state->phantom_stream_count; i++) {
+               if (state->phantom_streams[i] == phantom_stream)
+                       return true;
+       }
+
+       return false;
+}
+
+static bool dc_state_track_phantom_plane(struct dc_state *state,
+               struct dc_plane_state *phantom_plane)
+{
+       if (state->phantom_plane_count >= MAX_PHANTOM_PIPES)
+               return false;
+
+       state->phantom_planes[state->phantom_plane_count++] = phantom_plane;
+
+       return true;
+}
+
+static bool dc_state_untrack_phantom_plane(struct dc_state *state, struct dc_plane_state *phantom_plane)
+{
+       bool res = false;
+       int i;
+
+       /* first find phantom plane in the dc_state */
+       for (i = 0; i < state->phantom_plane_count; i++) {
+               if (state->phantom_planes[i] == phantom_plane) {
+                       state->phantom_planes[i] = NULL;
+                       res = true;
+                       break;
+               }
+       }
+
+       /* failed to find plane in state */
+       if (!res)
+               return res;
+
+       /* trim back phantom planes */
+       state->phantom_plane_count--;
+       for (; i < state->phantom_plane_count; i++)
+               state->phantom_planes[i] = state->phantom_planes[i + 1];
+
+       return res;
+}
+
+static bool dc_state_is_phantom_plane_tracked(struct dc_state *state, struct dc_plane_state *phantom_plane)
+{
+       int i;
+
+       for (i = 0; i < state->phantom_plane_count; i++) {
+               if (state->phantom_planes[i] == phantom_plane)
+                       return true;
+       }
+
+       return false;
+}
+
+static void dc_state_copy_internal(struct dc_state *dst_state, struct dc_state *src_state)
+{
+       int i, j;
+
+       memcpy(dst_state, src_state, sizeof(struct dc_state));
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               struct pipe_ctx *cur_pipe = &dst_state->res_ctx.pipe_ctx[i];
+
+               if (cur_pipe->top_pipe)
+                       cur_pipe->top_pipe =  &dst_state->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+               if (cur_pipe->bottom_pipe)
+                       cur_pipe->bottom_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+               if (cur_pipe->prev_odm_pipe)
+                       cur_pipe->prev_odm_pipe =  &dst_state->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
+
+               if (cur_pipe->next_odm_pipe)
+                       cur_pipe->next_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
+       }
+
+       /* retain phantoms */
+       for (i = 0; i < dst_state->phantom_stream_count; i++)
+               dc_stream_retain(dst_state->phantom_streams[i]);
+
+       for (i = 0; i < dst_state->phantom_plane_count; i++)
+               dc_plane_state_retain(dst_state->phantom_planes[i]);
+
+       /* retain streams and planes */
+       for (i = 0; i < dst_state->stream_count; i++) {
+               dc_stream_retain(dst_state->streams[i]);
+               for (j = 0; j < dst_state->stream_status[i].plane_count; j++)
+                       dc_plane_state_retain(
+                                       dst_state->stream_status[i].plane_states[j]);
+       }
+
+}
+
+static void init_state(struct dc *dc, struct dc_state *state)
+{
+       /* Each context must have their own instance of VBA and in order to
+        * initialize and obtain IP and SOC the base DML instance from DC is
+        * initially copied into every context
+        */
+       memcpy(&state->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
+}
+
+/* Public dc_state functions */
+struct dc_state *dc_state_create(struct dc *dc)
+{
+       struct dc_state *state = kvzalloc(sizeof(struct dc_state),
+                       GFP_KERNEL);
+
+       if (!state)
+               return NULL;
+
+       init_state(dc, state);
+       dc_state_construct(dc, state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+       if (dc->debug.using_dml2)
+               dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2);
+#endif
+
+       kref_init(&state->refcount);
+
+       return state;
+}
+
+void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
+{
+       struct kref refcount = dst_state->refcount;
+#ifdef CONFIG_DRM_AMD_DC_FP
+       struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2;
+#endif
+
+       dc_state_copy_internal(dst_state, src_state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+       dst_state->bw_ctx.dml2 = dst_dml2;
+       if (src_state->bw_ctx.dml2)
+               dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2);
+#endif
+
+       /* context refcount should not be overridden */
+       dst_state->refcount = refcount;
+}
+
+struct dc_state *dc_state_create_copy(struct dc_state *src_state)
+{
+       struct dc_state *new_state;
+
+       new_state = kvmalloc(sizeof(struct dc_state),
+                       GFP_KERNEL);
+       if (!new_state)
+               return NULL;
+
+       dc_state_copy_internal(new_state, src_state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+       if (src_state->bw_ctx.dml2 &&
+                       !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
+               dc_state_release(new_state);
+               return NULL;
+       }
+#endif
+
+       kref_init(&new_state->refcount);
+
+       return new_state;
+}
+
+void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state)
+{
+       dc_state_copy(dst_state, dc->current_state);
+}
+
+struct dc_state *dc_state_create_current_copy(struct dc *dc)
+{
+       return dc_state_create_copy(dc->current_state);
+}
+
+void dc_state_construct(struct dc *dc, struct dc_state *state)
+{
+       state->clk_mgr = dc->clk_mgr;
+
+       /* Initialise DIG link encoder resource tracking variables. */
+       if (dc->res_pool)
+               link_enc_cfg_init(dc, state);
+}
+
+void dc_state_destruct(struct dc_state *state)
+{
+       int i, j;
+
+       for (i = 0; i < state->stream_count; i++) {
+               for (j = 0; j < state->stream_status[i].plane_count; j++)
+                       dc_plane_state_release(
+                                       state->stream_status[i].plane_states[j]);
+
+               state->stream_status[i].plane_count = 0;
+               dc_stream_release(state->streams[i]);
+               state->streams[i] = NULL;
+       }
+       state->stream_count = 0;
+
+       /* release tracked phantoms */
+       for (i = 0; i < state->phantom_stream_count; i++) {
+               dc_stream_release(state->phantom_streams[i]);
+               state->phantom_streams[i] = NULL;
+       }
+
+       for (i = 0; i < state->phantom_plane_count; i++) {
+               dc_plane_state_release(state->phantom_planes[i]);
+               state->phantom_planes[i] = NULL;
+       }
+       state->stream_mask = 0;
+       memset(&state->res_ctx, 0, sizeof(state->res_ctx));
+       memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
+       memset(&state->dcn_bw_vars, 0, sizeof(state->dcn_bw_vars));
+       state->clk_mgr = NULL;
+       memset(&state->bw_ctx.bw, 0, sizeof(state->bw_ctx.bw));
+       memset(state->block_sequence, 0, sizeof(state->block_sequence));
+       state->block_sequence_steps = 0;
+       memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd));
+       state->dmub_cmd_count = 0;
+       memset(&state->perf_params, 0, sizeof(state->perf_params));
+       memset(&state->scratch, 0, sizeof(state->scratch));
+}
+
+void dc_state_retain(struct dc_state *state)
+{
+       kref_get(&state->refcount);
+}
+
+static void dc_state_free(struct kref *kref)
+{
+       struct dc_state *state = container_of(kref, struct dc_state, refcount);
+
+       dc_state_destruct(state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+       dml2_destroy(state->bw_ctx.dml2);
+       state->bw_ctx.dml2 = 0;
+#endif
+
+       kvfree(state);
+}
+
+void dc_state_release(struct dc_state *state)
+{
+       kref_put(&state->refcount, dc_state_free);
+}
+/*
+ * dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
+ */
+enum dc_status dc_state_add_stream(
+               struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *stream)
+{
+       enum dc_status res;
+
+       DC_LOGGER_INIT(dc->ctx->logger);
+
+       if (state->stream_count >= dc->res_pool->timing_generator_count) {
+               DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
+               return DC_ERROR_UNEXPECTED;
+       }
+
+       state->streams[state->stream_count] = stream;
+       dc_stream_retain(stream);
+       state->stream_count++;
+
+       res = resource_add_otg_master_for_stream_output(
+                       state, dc->res_pool, stream);
+       if (res != DC_OK)
+               DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
+
+       return res;
+}
+
+/*
+ * dc_state_remove_stream() - Remove a stream from a dc_state.
+ */
+enum dc_status dc_state_remove_stream(
+               struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *stream)
+{
+       int i;
+       struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(
+                       &state->res_ctx, stream);
+
+       if (!del_pipe) {
+               dm_error("Pipe not found for stream %p !\n", stream);
+               return DC_ERROR_UNEXPECTED;
+       }
+
+       resource_update_pipes_for_stream_with_slice_count(state,
+                       dc->current_state, dc->res_pool, stream, 1);
+       resource_remove_otg_master_for_stream_output(
+                       state, dc->res_pool, stream);
+
+       for (i = 0; i < state->stream_count; i++)
+               if (state->streams[i] == stream)
+                       break;
+
+       if (state->streams[i] != stream) {
+               dm_error("Context doesn't have stream %p !\n", stream);
+               return DC_ERROR_UNEXPECTED;
+       }
+
+       dc_stream_release(state->streams[i]);
+       state->stream_count--;
+
+       /* Trim back arrays */
+       for (; i < state->stream_count; i++) {
+               state->streams[i] = state->streams[i + 1];
+               state->stream_status[i] = state->stream_status[i + 1];
+       }
+
+       state->streams[state->stream_count] = NULL;
+       memset(
+                       &state->stream_status[state->stream_count],
+                       0,
+                       sizeof(state->stream_status[0]));
+
+       return DC_OK;
+}
+
+bool dc_state_add_plane(
+               const struct dc *dc,
+               struct dc_stream_state *stream,
+               struct dc_plane_state *plane_state,
+               struct dc_state *state)
+{
+       struct resource_pool *pool = dc->res_pool;
+       struct pipe_ctx *otg_master_pipe;
+       struct dc_stream_status *stream_status = NULL;
+       bool added = false;
+
+       stream_status = dc_state_get_stream_status(state, stream);
+       if (stream_status == NULL) {
+               dm_error("Existing stream not found; failed to attach surface!\n");
+               goto out;
+       } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+               dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+                               plane_state, MAX_SURFACE_NUM);
+               goto out;
+       }
+
+       otg_master_pipe = resource_get_otg_master_for_stream(
+                       &state->res_ctx, stream);
+       if (otg_master_pipe)
+               added = resource_append_dpp_pipes_for_plane_composition(state,
+                               dc->current_state, pool, otg_master_pipe, plane_state);
+
+       if (added) {
+               stream_status->plane_states[stream_status->plane_count] =
+                               plane_state;
+               stream_status->plane_count++;
+               dc_plane_state_retain(plane_state);
+       }
+
+out:
+       return added;
+}
+
+bool dc_state_remove_plane(
+               const struct dc *dc,
+               struct dc_stream_state *stream,
+               struct dc_plane_state *plane_state,
+               struct dc_state *state)
+{
+       int i;
+       struct dc_stream_status *stream_status = NULL;
+       struct resource_pool *pool = dc->res_pool;
+
+       if (!plane_state)
+               return true;
+
+       for (i = 0; i < state->stream_count; i++)
+               if (state->streams[i] == stream) {
+                       stream_status = &state->stream_status[i];
+                       break;
+               }
+
+       if (stream_status == NULL) {
+               dm_error("Existing stream not found; failed to remove plane.\n");
+               return false;
+       }
+
+       resource_remove_dpp_pipes_for_plane_composition(
+                       state, pool, plane_state);
+
+       for (i = 0; i < stream_status->plane_count; i++) {
+               if (stream_status->plane_states[i] == plane_state) {
+                       dc_plane_state_release(stream_status->plane_states[i]);
+                       break;
+               }
+       }
+
+       if (i == stream_status->plane_count) {
+               dm_error("Existing plane_state not found; failed to detach it!\n");
+               return false;
+       }
+
+       stream_status->plane_count--;
+
+       /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
+       for (; i < stream_status->plane_count; i++)
+               stream_status->plane_states[i] = stream_status->plane_states[i + 1];
+
+       stream_status->plane_states[stream_status->plane_count] = NULL;
+
+       if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+               /* ODM combine could prevent us from supporting more planes
+                * we will reset ODM slice count back to 1 when all planes have
+                * been removed to maximize the amount of planes supported when
+                * new planes are added.
+                */
+               resource_update_pipes_for_stream_with_slice_count(
+                               state, dc->current_state, dc->res_pool, stream, 1);
+
+       return true;
+}
+
+/**
+ * dc_state_rem_all_planes_for_stream - Remove planes attached to the target stream.
+ *
+ * @dc: Current dc state.
+ * @stream: Target stream, which we want to remove the attached plans.
+ * @state: context from which the planes are to be removed.
+ *
+ * Return:
+ * Return true if DC was able to remove all planes from the target
+ * stream, otherwise, return false.
+ */
+bool dc_state_rem_all_planes_for_stream(
+               const struct dc *dc,
+               struct dc_stream_state *stream,
+               struct dc_state *state)
+{
+       int i, old_plane_count;
+       struct dc_stream_status *stream_status = NULL;
+       struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+       for (i = 0; i < state->stream_count; i++)
+               if (state->streams[i] == stream) {
+                       stream_status = &state->stream_status[i];
+                       break;
+               }
+
+       if (stream_status == NULL) {
+               dm_error("Existing stream %p not found!\n", stream);
+               return false;
+       }
+
+       old_plane_count = stream_status->plane_count;
+
+       for (i = 0; i < old_plane_count; i++)
+               del_planes[i] = stream_status->plane_states[i];
+
+       for (i = 0; i < old_plane_count; i++)
+               if (!dc_state_remove_plane(dc, stream, del_planes[i], state))
+                       return false;
+
+       return true;
+}
+
+bool dc_state_add_all_planes_for_stream(
+               const struct dc *dc,
+               struct dc_stream_state *stream,
+               struct dc_plane_state * const *plane_states,
+               int plane_count,
+               struct dc_state *state)
+{
+       int i;
+       bool result = true;
+
+       for (i = 0; i < plane_count; i++)
+               if (!dc_state_add_plane(dc, stream, plane_states[i], state)) {
+                       result = false;
+                       break;
+               }
+
+       return result;
+}
+
+/* Private dc_state functions */
+
+/**
+ * dc_state_get_stream_status - Get stream status from given dc state
+ * @state: DC state to find the stream status in
+ * @stream: The stream to get the stream status for
+ *
+ * The given stream is expected to exist in the given dc state. Otherwise, NULL
+ * will be returned.
+ */
+struct dc_stream_status *dc_state_get_stream_status(
+               struct dc_state *state,
+               struct dc_stream_state *stream)
+{
+       uint8_t i;
+
+       if (state == NULL)
+               return NULL;
+
+       for (i = 0; i < state->stream_count; i++) {
+               if (stream == state->streams[i])
+                       return &state->stream_status[i];
+       }
+
+       return NULL;
+}
+
+enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state,
+               const struct pipe_ctx *pipe_ctx)
+{
+       return dc_state_get_stream_subvp_type(state, pipe_ctx->stream);
+}
+
+enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state,
+               const struct dc_stream_state *stream)
+{
+       int i;
+
+       enum mall_stream_type type = SUBVP_NONE;
+
+       for (i = 0; i < state->stream_count; i++) {
+               if (state->streams[i] == stream) {
+                       type = state->stream_status[i].mall_stream_config.type;
+                       break;
+               }
+       }
+
+       return type;
+}
+
+struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state,
+               const struct dc_stream_state *stream)
+{
+       int i;
+
+       struct dc_stream_state *paired_stream = NULL;
+
+       for (i = 0; i < state->stream_count; i++) {
+               if (state->streams[i] == stream) {
+                       paired_stream = state->stream_status[i].mall_stream_config.paired_stream;
+                       break;
+               }
+       }
+
+       return paired_stream;
+}
+
+struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *main_stream)
+{
+       struct dc_stream_state *phantom_stream;
+
+       DC_LOGGER_INIT(dc->ctx->logger);
+
+       phantom_stream = dc_create_stream_for_sink(main_stream->sink);
+
+       if (!phantom_stream) {
+               DC_LOG_ERROR("Failed to allocate phantom stream.\n");
+               return NULL;
+       }
+
+       /* track phantom stream in dc_state */
+       dc_state_track_phantom_stream(state, phantom_stream);
+
+       phantom_stream->is_phantom = true;
+       phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
+       phantom_stream->dpms_off = true;
+
+       return phantom_stream;
+}
+
+void dc_state_release_phantom_stream(const struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *phantom_stream)
+{
+       DC_LOGGER_INIT(dc->ctx->logger);
+
+       if (!dc_state_untrack_phantom_stream(state, phantom_stream)) {
+               DC_LOG_ERROR("Failed to free phantom stream %p in dc state %p.\n", phantom_stream, state);
+               return;
+       }
+
+       dc_stream_release(phantom_stream);
+}
+
+struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+               struct dc_state *state,
+               struct dc_plane_state *main_plane)
+{
+       struct dc_plane_state *phantom_plane = dc_create_plane_state(dc);
+
+       DC_LOGGER_INIT(dc->ctx->logger);
+
+       if (!phantom_plane) {
+               DC_LOG_ERROR("Failed to allocate phantom plane.\n");
+               return NULL;
+       }
+
+       /* track phantom inside dc_state */
+       dc_state_track_phantom_plane(state, phantom_plane);
+
+       phantom_plane->is_phantom = true;
+
+       return phantom_plane;
+}
+
+void dc_state_release_phantom_plane(const struct dc *dc,
+               struct dc_state *state,
+               struct dc_plane_state *phantom_plane)
+{
+       DC_LOGGER_INIT(dc->ctx->logger);
+
+       if (!dc_state_untrack_phantom_plane(state, phantom_plane)) {
+               DC_LOG_ERROR("Failed to free phantom plane %p in dc state %p.\n", phantom_plane, state);
+               return;
+       }
+
+       dc_plane_state_release(phantom_plane);
+}
+
+/* add phantom streams to context and generate correct meta inside dc_state */
+enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *phantom_stream,
+               struct dc_stream_state *main_stream)
+{
+       struct dc_stream_status *main_stream_status;
+       struct dc_stream_status *phantom_stream_status;
+       enum dc_status res = dc_state_add_stream(dc, state, phantom_stream);
+
+       /* check if stream is tracked */
+       if (res == DC_OK && !dc_state_is_phantom_stream_tracked(state, phantom_stream)) {
+               /* stream must be tracked if added to state */
+               dc_state_track_phantom_stream(state, phantom_stream);
+       }
+
+       /* setup subvp meta */
+       main_stream_status = dc_state_get_stream_status(state, main_stream);
+       phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
+       phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
+       phantom_stream_status->mall_stream_config.paired_stream = main_stream;
+       main_stream_status->mall_stream_config.type = SUBVP_MAIN;
+       main_stream_status->mall_stream_config.paired_stream = phantom_stream;
+
+       return res;
+}
+
+enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *phantom_stream)
+{
+       struct dc_stream_status *main_stream_status;
+       struct dc_stream_status *phantom_stream_status;
+
+       /* reset subvp meta */
+       phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
+       main_stream_status = dc_state_get_stream_status(state, phantom_stream_status->mall_stream_config.paired_stream);
+       phantom_stream_status->mall_stream_config.type = SUBVP_NONE;
+       phantom_stream_status->mall_stream_config.paired_stream = NULL;
+       if (main_stream_status) {
+               main_stream_status->mall_stream_config.type = SUBVP_NONE;
+               main_stream_status->mall_stream_config.paired_stream = NULL;
+       }
+
+       /* remove stream from state */
+       return dc_state_remove_stream(dc, state, phantom_stream);
+}
+
+bool dc_state_add_phantom_plane(
+               const struct dc *dc,
+               struct dc_stream_state *phantom_stream,
+               struct dc_plane_state *phantom_plane,
+               struct dc_state *state)
+{
+       bool res = dc_state_add_plane(dc, phantom_stream, phantom_plane, state);
+
+       /* check if stream is tracked */
+       if (res && !dc_state_is_phantom_plane_tracked(state, phantom_plane)) {
+               /* stream must be tracked if added to state */
+               dc_state_track_phantom_plane(state, phantom_plane);
+       }
+
+       return res;
+}
+
+bool dc_state_remove_phantom_plane(
+               const struct dc *dc,
+               struct dc_stream_state *phantom_stream,
+               struct dc_plane_state *phantom_plane,
+               struct dc_state *state)
+{
+       return dc_state_remove_plane(dc, phantom_stream, phantom_plane, state);
+}
+
+bool dc_state_rem_all_phantom_planes_for_stream(
+               const struct dc *dc,
+               struct dc_stream_state *phantom_stream,
+               struct dc_state *state,
+               bool should_release_planes)
+{
+       int i, old_plane_count;
+       struct dc_stream_status *stream_status = NULL;
+       struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+       for (i = 0; i < state->stream_count; i++)
+               if (state->streams[i] == phantom_stream) {
+                       stream_status = &state->stream_status[i];
+                       break;
+               }
+
+       if (stream_status == NULL) {
+               dm_error("Existing stream %p not found!\n", phantom_stream);
+               return false;
+       }
+
+       old_plane_count = stream_status->plane_count;
+
+       for (i = 0; i < old_plane_count; i++)
+               del_planes[i] = stream_status->plane_states[i];
+
+       for (i = 0; i < old_plane_count; i++) {
+               if (!dc_state_remove_plane(dc, phantom_stream, del_planes[i], state))
+                       return false;
+               if (should_release_planes)
+                       dc_state_release_phantom_plane(dc, state, del_planes[i]);
+       }
+
+       return true;
+}
+
+bool dc_state_add_all_phantom_planes_for_stream(
+               const struct dc *dc,
+               struct dc_stream_state *phantom_stream,
+               struct dc_plane_state * const *phantom_planes,
+               int plane_count,
+               struct dc_state *state)
+{
+       return dc_state_add_all_planes_for_stream(dc, phantom_stream, phantom_planes, plane_count, state);
+}
+
+bool dc_state_remove_phantom_streams_and_planes(
+       struct dc *dc,
+       struct dc_state *state)
+{
+       int i;
+       bool removed_phantom = false;
+       struct dc_stream_state *phantom_stream = NULL;
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+               if (pipe->plane_state && pipe->stream && dc_state_get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
+                       phantom_stream = pipe->stream;
+
+                       dc_state_rem_all_phantom_planes_for_stream(dc, phantom_stream, state, false);
+                       dc_state_remove_phantom_stream(dc, state, phantom_stream);
+                       removed_phantom = true;
+               }
+       }
+       return removed_phantom;
+}
+
+void dc_state_release_phantom_streams_and_planes(
+               struct dc *dc,
+               struct dc_state *state)
+{
+       int i;
+
+       for (i = 0; i < state->phantom_stream_count; i++)
+               dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]);
+
+       for (i = 0; i < state->phantom_plane_count; i++)
+               dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
+}
index 38cd29b210c05477f9873526eb1352dc0915695c..54670e0b15189552dd8ee85a5c1a6f905820be37 100644 (file)
@@ -31,6 +31,8 @@
 #include "ipp.h"
 #include "timing_generator.h"
 #include "dc_dmub_srv.h"
+#include "dc_state_priv.h"
+#include "dc_stream_priv.h"
 
 #define DC_LOGGER dc->ctx->logger
 
@@ -54,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
        }
 }
 
-static bool dc_stream_construct(struct dc_stream_state *stream,
+bool dc_stream_construct(struct dc_stream_state *stream,
        struct dc_sink *dc_sink_data)
 {
        uint32_t i = 0;
@@ -121,13 +123,12 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
        }
        stream->out_transfer_func->type = TF_TYPE_BYPASS;
 
-       stream->stream_id = stream->ctx->dc_stream_id_count;
-       stream->ctx->dc_stream_id_count++;
+       dc_stream_assign_stream_id(stream);
 
        return true;
 }
 
-static void dc_stream_destruct(struct dc_stream_state *stream)
+void dc_stream_destruct(struct dc_stream_state *stream)
 {
        dc_sink_release(stream->sink);
        if (stream->out_transfer_func != NULL) {
@@ -136,6 +137,13 @@ static void dc_stream_destruct(struct dc_stream_state *stream)
        }
 }
 
+void dc_stream_assign_stream_id(struct dc_stream_state *stream)
+{
+       /* MSB is reserved to indicate phantoms */
+       stream->stream_id = stream->ctx->dc_stream_id_count;
+       stream->ctx->dc_stream_id_count++;
+}
+
 void dc_stream_retain(struct dc_stream_state *stream)
 {
        kref_get(&stream->refcount);
@@ -196,8 +204,7 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
        if (new_stream->out_transfer_func)
                dc_transfer_func_retain(new_stream->out_transfer_func);
 
-       new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
-       new_stream->ctx->dc_stream_id_count++;
+       dc_stream_assign_stream_id(new_stream);
 
        /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
        if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
@@ -208,31 +215,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
        return new_stream;
 }
 
-/**
- * dc_stream_get_status_from_state - Get stream status from given dc state
- * @state: DC state to find the stream status in
- * @stream: The stream to get the stream status for
- *
- * The given stream is expected to exist in the given dc state. Otherwise, NULL
- * will be returned.
- */
-struct dc_stream_status *dc_stream_get_status_from_state(
-       struct dc_state *state,
-       struct dc_stream_state *stream)
-{
-       uint8_t i;
-
-       if (state == NULL)
-               return NULL;
-
-       for (i = 0; i < state->stream_count; i++) {
-               if (stream == state->streams[i])
-                       return &state->stream_status[i];
-       }
-
-       return NULL;
-}
-
 /**
  * dc_stream_get_status() - Get current stream status of the given stream state
  * @stream: The stream to get the stream status for.
@@ -244,7 +226,7 @@ struct dc_stream_status *dc_stream_get_status(
        struct dc_stream_state *stream)
 {
        struct dc *dc = stream->ctx->dc;
-       return dc_stream_get_status_from_state(dc->current_state, stream);
+       return dc_state_get_stream_status(dc->current_state, stream);
 }
 
 static void program_cursor_attributes(
@@ -465,7 +447,8 @@ bool dc_stream_add_writeback(struct dc *dc,
        if (dc->hwss.enable_writeback) {
                struct dc_stream_status *stream_status = dc_stream_get_status(stream);
                struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
-               dwb->otg_inst = stream_status->primary_otg_inst;
+               if (stream_status)
+                       dwb->otg_inst = stream_status->primary_otg_inst;
        }
 
        if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
index a80e45300783c08bb6df7f0e0667a3cabe1b8b66..19a2c7140ae8437c8f01828837a0fc37dd559ecf 100644 (file)
 #include "transform.h"
 #include "dpp.h"
 
+#include "dc_plane_priv.h"
+
 /*******************************************************************************
  * Private functions
  ******************************************************************************/
-static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
 {
        plane_state->ctx = ctx;
 
@@ -63,7 +65,7 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
 
 }
 
-static void dc_plane_destruct(struct dc_plane_state *plane_state)
+void dc_plane_destruct(struct dc_plane_state *plane_state)
 {
        if (plane_state->gamma_correction != NULL) {
                dc_gamma_release(&plane_state->gamma_correction);
index 2c85f8ee682fe770b68a9127e30b541e9846e8a0..5d7aa882416b3435a5dcfbaf502a9f326981bc81 100644 (file)
@@ -27,6 +27,8 @@
 #define DC_INTERFACE_H_
 
 #include "dc_types.h"
+#include "dc_state.h"
+#include "dc_plane.h"
 #include "grph_object_defs.h"
 #include "logger_types.h"
 #include "hdcp_msg_types.h"
@@ -49,7 +51,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.264"
+#define DC_VER "3.2.266"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -461,6 +463,12 @@ enum dml_hostvm_override_opts {
        DML_HOSTVM_OVERRIDE_TRUE = 0x2,
 };
 
+enum dc_replay_power_opts {
+       replay_power_opt_invalid                = 0x0,
+       replay_power_opt_smu_opt_static_screen  = 0x1,
+       replay_power_opt_z10_static_screen      = 0x10,
+};
+
 enum dcc_option {
        DCC_ENABLE = 0,
        DCC_DISABLE = 1,
@@ -979,6 +987,8 @@ struct dc_debug_options {
        unsigned int ips2_eval_delay_us;
        unsigned int ips2_entry_delay_us;
        bool disable_timeout;
+       bool disable_extblankadj;
+       unsigned int static_screen_wait_frames;
 };
 
 struct gpu_info_soc_bounding_box_v1_0;
@@ -1389,13 +1399,6 @@ struct dc_surface_update {
 /*
  * Create a new surface with default parameters;
  */
-struct dc_plane_state *dc_create_plane_state(struct dc *dc);
-const struct dc_plane_status *dc_plane_get_status(
-               const struct dc_plane_state *plane_state);
-
-void dc_plane_state_retain(struct dc_plane_state *plane_state);
-void dc_plane_state_release(struct dc_plane_state *plane_state);
-
 void dc_gamma_retain(struct dc_gamma *dc_gamma);
 void dc_gamma_release(struct dc_gamma **dc_gamma);
 struct dc_gamma *dc_create_gamma(void);
@@ -1459,37 +1462,20 @@ enum dc_status dc_validate_global_state(
                struct dc_state *new_ctx,
                bool fast_validate);
 
-
-void dc_resource_state_construct(
-               const struct dc *dc,
-               struct dc_state *dst_ctx);
-
 bool dc_acquire_release_mpc_3dlut(
                struct dc *dc, bool acquire,
                struct dc_stream_state *stream,
                struct dc_3dlut **lut,
                struct dc_transfer_func **shaper);
 
-void dc_resource_state_copy_construct(
-               const struct dc_state *src_ctx,
-               struct dc_state *dst_ctx);
-
-void dc_resource_state_copy_construct_current(
-               const struct dc *dc,
-               struct dc_state *dst_ctx);
-
-void dc_resource_state_destruct(struct dc_state *context);
-
 bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
+void get_audio_check(struct audio_info *aud_modes,
+       struct audio_check *aud_chk);
 
 enum dc_status dc_commit_streams(struct dc *dc,
                                 struct dc_stream_state *streams[],
                                 uint8_t stream_count);
 
-struct dc_state *dc_create_state(struct dc *dc);
-struct dc_state *dc_copy_state(struct dc_state *src_ctx);
-void dc_retain_state(struct dc_state *context);
-void dc_release_state(struct dc_state *context);
 
 struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
                struct dc_stream_state *stream,
@@ -2098,6 +2084,20 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
                const struct dc_stream_state *stream, struct psr_config *psr_config,
                struct psr_context *psr_context);
 
+/*
+ * Communicate with DMUB to allow or disallow Panel Replay on the specified link:
+ *
+ * @link: pointer to the dc_link struct instance
+ * @enable: enable(active) or disable(inactive) replay
+ * @wait: state transition need to wait the active set completed.
+ * @force_static: force disable(inactive) the replay
+ * @power_opts: set power optimazation parameters to DMUB.
+ *
+ * return: allow Replay active will return true, else will return false.
+ */
+bool dc_link_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
+               bool wait, bool force_static, const unsigned int *power_opts);
+
 bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state);
 
 /* On eDP links this function call will stall until T12 has elapsed.
@@ -2193,11 +2193,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
  *
  * @dc: pointer to dc struct
  * @stream: pointer to all possible streams
- * @num_streams: number of valid DPIA streams
+ * @count: number of valid DPIA streams
  *
  * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
  */
-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
+bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
                const unsigned int count);
 
 /* Sink Interfaces - A sink corresponds to a display output device */
@@ -2342,6 +2342,9 @@ void dc_hardware_release(struct dc *dc);
 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc);
 
 bool dc_set_psr_allow_active(struct dc *dc, bool enable);
+
+bool dc_set_replay_allow_active(struct dc *dc, bool active);
+
 void dc_z10_restore(const struct dc *dc);
 void dc_z10_save_init(struct dc *dc);
 
index 1a4d615ccdec117b9409e2d2bdee60e7be3b2981..2b79a0e5638e1b757ea3d3527add517db139552e 100644 (file)
@@ -33,6 +33,7 @@
 #include "cursor_reg_cache.h"
 #include "resource.h"
 #include "clk_mgr.h"
+#include "dc_state_priv.h"
 
 #define CTX dc_dmub_srv->ctx
 #define DC_LOGGER CTX->logger
@@ -140,7 +141,10 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
 
                if (status == DMUB_STATUS_QUEUE_FULL) {
                        /* Execute and wait for queue to become empty again. */
-                       dmub_srv_cmd_execute(dmub);
+                       status = dmub_srv_cmd_execute(dmub);
+                       if (status == DMUB_STATUS_POWER_STATE_D3)
+                               return false;
+
                        dmub_srv_wait_for_idle(dmub, 100000);
 
                        /* Requeue the command. */
@@ -148,16 +152,20 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
                }
 
                if (status != DMUB_STATUS_OK) {
-                       DC_ERROR("Error queueing DMUB command: status=%d\n", status);
-                       dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+                       if (status != DMUB_STATUS_POWER_STATE_D3) {
+                               DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+                               dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+                       }
                        return false;
                }
        }
 
        status = dmub_srv_cmd_execute(dmub);
        if (status != DMUB_STATUS_OK) {
-               DC_ERROR("Error starting DMUB execution: status=%d\n", status);
-               dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+               if (status != DMUB_STATUS_POWER_STATE_D3) {
+                       DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+                       dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+               }
                return false;
        }
 
@@ -218,7 +226,10 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
 
                if (status == DMUB_STATUS_QUEUE_FULL) {
                        /* Execute and wait for queue to become empty again. */
-                       dmub_srv_cmd_execute(dmub);
+                       status = dmub_srv_cmd_execute(dmub);
+                       if (status == DMUB_STATUS_POWER_STATE_D3)
+                               return false;
+
                        dmub_srv_wait_for_idle(dmub, 100000);
 
                        /* Requeue the command. */
@@ -226,16 +237,20 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
                }
 
                if (status != DMUB_STATUS_OK) {
-                       DC_ERROR("Error queueing DMUB command: status=%d\n", status);
-                       dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+                       if (status != DMUB_STATUS_POWER_STATE_D3) {
+                               DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+                               dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+                       }
                        return false;
                }
        }
 
        status = dmub_srv_cmd_execute(dmub);
        if (status != DMUB_STATUS_OK) {
-               DC_ERROR("Error starting DMUB execution: status=%d\n", status);
-               dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+               if (status != DMUB_STATUS_POWER_STATE_D3) {
+                       DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+                       dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+               }
                return false;
        }
 
@@ -287,17 +302,11 @@ bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
 bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
                                    unsigned int stream_mask)
 {
-       struct dmub_srv *dmub;
-       const uint32_t timeout = 30;
-
        if (!dc_dmub_srv || !dc_dmub_srv->dmub)
                return false;
 
-       dmub = dc_dmub_srv->dmub;
-
-       return dmub_srv_send_gpint_command(
-                      dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
-                      stream_mask, timeout) == DMUB_STATUS_OK;
+       return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
+                                        stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
@@ -346,7 +355,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal
        cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
 
        // Send the command to the DMCUB.
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
@@ -360,7 +369,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
        cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
 
        // Send the command to the DMCUB.
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
@@ -453,7 +462,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
                sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
 
        // Send the command to the DMCUB.
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
@@ -474,7 +483,7 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
        cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
 
        /* If command was processed, copy feature caps to dmub srv */
-       if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+       if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
            cmd.query_feature_caps.header.ret_status == 0) {
                memcpy(&dc_dmub_srv->dmub->feature_caps,
                       &cmd.query_feature_caps.query_feature_caps_data,
@@ -499,7 +508,7 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
        cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
 
        // If command was processed, copy feature caps to dmub srv
-       if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+       if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
                cmd.visual_confirm_color.header.ret_status == 0) {
                memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
                        &cmd.visual_confirm_color.visual_confirm_color_data,
@@ -510,10 +519,11 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
 /**
  * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
  *
- * @dc: [in] current dc state
+ * @dc: [in] pointer to dc object
  * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
  * @vblank_pipe: [in] pipe_ctx for the DRR pipe
  * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
+ * @context: [in] DC state for access to phantom stream
  *
  * Populate the DMCUB SubVP command with DRR pipe info. All the information
  * required for calculating the SubVP + DRR microschedule is populated here.
@@ -524,12 +534,14 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
  * 3. Populate the drr_info with the min and max supported vtotal values
  */
 static void populate_subvp_cmd_drr_info(struct dc *dc,
+               struct dc_state *context,
                struct pipe_ctx *subvp_pipe,
                struct pipe_ctx *vblank_pipe,
                struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
 {
+       struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
        struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
-       struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+       struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
        struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
        uint16_t drr_frame_us = 0;
        uint16_t min_drr_supported_us = 0;
@@ -617,7 +629,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
                        continue;
 
                // Find the SubVP pipe
-               if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+               if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
                        break;
        }
 
@@ -634,7 +646,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
 
        if (vblank_pipe->stream->ignore_msa_timing_param &&
                (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
-               populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data);
+               populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
 }
 
 /**
@@ -659,10 +671,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
        uint32_t subvp0_prefetch_us = 0;
        uint32_t subvp1_prefetch_us = 0;
        uint32_t prefetch_delta_us = 0;
-       struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing;
-       struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
+       struct dc_stream_state *phantom_stream0 = NULL;
+       struct dc_stream_state *phantom_stream1 = NULL;
+       struct dc_crtc_timing *phantom_timing0 = NULL;
+       struct dc_crtc_timing *phantom_timing1 = NULL;
        struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
 
+       phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
+       phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
+       phantom_timing0 = &phantom_stream0->timing;
+       phantom_timing1 = &phantom_stream1->timing;
+
        subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
                        (uint64_t)phantom_timing0->h_total * 1000000),
                        (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
@@ -712,8 +731,9 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
        uint32_t j;
        struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
                        &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
+       struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
        struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
-       struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+       struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
        uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
 
        pipe_data->mode = SUBVP;
@@ -767,7 +787,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
        for (j = 0; j < dc->res_pool->pipe_count; j++) {
                struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
 
-               if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
+               if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
                        pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
                        if (phantom_pipe->bottom_pipe) {
                                pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
@@ -801,6 +821,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
        union dmub_rb_cmd cmd;
        struct pipe_ctx *subvp_pipes[2];
        uint32_t wm_val_refclk = 0;
+       enum mall_stream_type pipe_mall_type;
 
        memset(&cmd, 0, sizeof(cmd));
        // FW command for SUBVP
@@ -816,7 +837,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
                 */
                if (resource_is_pipe_type(pipe, OTG_MASTER) &&
                                resource_is_pipe_type(pipe, DPP_PIPE) &&
-                               pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+                               dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
                        subvp_pipes[subvp_count++] = pipe;
        }
 
@@ -824,6 +845,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
                // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
                for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
                        struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+                       pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 
                        if (!pipe->stream)
                                continue;
@@ -834,12 +856,11 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
                         */
                        if (resource_is_pipe_type(pipe, OTG_MASTER) &&
                                        resource_is_pipe_type(pipe, DPP_PIPE) &&
-                                       pipe->stream->mall_stream_config.paired_stream &&
-                                       pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+                                       pipe_mall_type == SUBVP_MAIN) {
                                populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
                        } else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
                                        resource_is_pipe_type(pipe, DPP_PIPE) &&
-                                       pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+                                       pipe_mall_type == SUBVP_NONE) {
                                // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
                                // we run through DML without calculating "natural" P-state support
                                populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
@@ -861,7 +882,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
                cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
        }
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
@@ -1098,7 +1119,7 @@ void dc_send_update_cursor_info_to_dmu(
                                pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
 
                /* Combine 2nd cmds update_curosr_info to DMU */
-               dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
+               dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
        }
 }
 
@@ -1112,25 +1133,20 @@ bool dc_dmub_check_min_version(struct dmub_srv *srv)
 void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
 {
        struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
-       struct dmub_srv *dmub;
-       enum dmub_status status;
-       static const uint32_t timeout_us = 30;
 
        if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
                DC_LOG_ERROR("%s: invalid parameters.", __func__);
                return;
        }
 
-       dmub = dc_dmub_srv->dmub;
-
-       status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, timeout_us);
-       if (status != DMUB_STATUS_OK) {
+       if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1,
+                                      0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
                DC_LOG_ERROR("timeout updating trace buffer mask word\n");
                return;
        }
 
-       status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, timeout_us);
-       if (status != DMUB_STATUS_OK) {
+       if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK,
+                                      0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
                DC_LOG_ERROR("timeout updating trace buffer mask word\n");
                return;
        }
@@ -1148,6 +1164,9 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
        struct dc_context *dc_ctx = dc_dmub_srv->ctx;
        enum dmub_status status;
 
+       if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+               return true;
+
        if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
                return true;
 
@@ -1169,7 +1188,7 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
        return true;
 }
 
-void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
+static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
 {
        union dmub_rb_cmd cmd = {0};
 
@@ -1190,20 +1209,20 @@ void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
                        dc->hwss.set_idle_state(dc, true);
        }
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       /* NOTE: This does not use the "wake" interface since this is part of the wake path. */
+       /* We also do not perform a wait since DMCUB could enter idle after the notification. */
+       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 }
 
-void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
+static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
 {
-       const uint32_t max_num_polls = 10000;
        uint32_t allow_state = 0;
        uint32_t commit_state = 0;
-       int i;
 
        if (dc->debug.dmcub_emulation)
                return;
 
-       if (!dc->idle_optimizations_allowed)
+       if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
                return;
 
        if (dc->hwss.get_idle_state &&
@@ -1215,8 +1234,16 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
 
                if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
                        // Wait for evaluation time
-                       udelay(dc->debug.ips2_eval_delay_us);
-                       commit_state = dc->hwss.get_idle_state(dc);
+                       for (;;) {
+                               udelay(dc->debug.ips2_eval_delay_us);
+                               commit_state = dc->hwss.get_idle_state(dc);
+                               if (commit_state & DMUB_IPS2_ALLOW_MASK)
+                                       break;
+
+                               /* allow was still set, retry eval delay */
+                               dc->hwss.set_idle_state(dc, false);
+                       }
+
                        if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
                                // Tell PMFW to exit low power state
                                dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
@@ -1225,17 +1252,13 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
                                udelay(dc->debug.ips2_entry_delay_us);
                                dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
 
-                               for (i = 0; i < max_num_polls; ++i) {
+                               for (;;) {
                                        commit_state = dc->hwss.get_idle_state(dc);
                                        if (commit_state & DMUB_IPS2_COMMIT_MASK)
                                                break;
 
                                        udelay(1);
-
-                                       if (dc->debug.disable_timeout)
-                                               i--;
                                }
-                               ASSERT(i < max_num_polls);
 
                                if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
                                        ASSERT(0);
@@ -1250,17 +1273,13 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
 
                dc_dmub_srv_notify_idle(dc, false);
                if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
-                       for (i = 0; i < max_num_polls; ++i) {
+                       for (;;) {
                                commit_state = dc->hwss.get_idle_state(dc);
                                if (commit_state & DMUB_IPS1_COMMIT_MASK)
                                        break;
 
                                udelay(1);
-
-                               if (dc->debug.disable_timeout)
-                                       i--;
                        }
-                       ASSERT(i < max_num_polls);
                }
        }
 
@@ -1282,3 +1301,117 @@ void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_c
        else
                dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
 }
+
+void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
+{
+       struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+
+       if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+               return;
+
+       if (dc_dmub_srv->idle_allowed == allow_idle)
+               return;
+
+       /*
+        * Entering a low power state requires a driver notification.
+        * Powering up the hardware requires notifying PMFW and DMCUB.
+        * Clearing the driver idle allow requires a DMCUB command.
+        * DMCUB commands requires the DMCUB to be powered up and restored.
+        *
+        * Exit out early to prevent an infinite loop of DMCUB commands
+        * triggering exit low power - use software state to track this.
+        */
+       dc_dmub_srv->idle_allowed = allow_idle;
+
+       if (!allow_idle)
+               dc_dmub_srv_exit_low_power_state(dc);
+       else
+               dc_dmub_srv_notify_idle(dc, allow_idle);
+}
+
+bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
+                                 enum dm_dmub_wait_type wait_type)
+{
+       return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type);
+}
+
+bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
+                                      union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+       struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
+       bool result = false, reallow_idle = false;
+
+       if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+               return false;
+
+       if (count == 0)
+               return true;
+
+       if (dc_dmub_srv->idle_allowed) {
+               dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
+               reallow_idle = true;
+       }
+
+       /*
+        * These may have different implementations in DM, so ensure
+        * that we guide it to the expected helper.
+        */
+       if (count > 1)
+               result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type);
+       else
+               result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
+
+       if (result && reallow_idle)
+               dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
+
+       return result;
+}
+
+static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
+                                 uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
+{
+       struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
+       const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30;
+       enum dmub_status status;
+
+       if (response)
+               *response = 0;
+
+       if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+               return false;
+
+       status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us);
+       if (status != DMUB_STATUS_OK) {
+               if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT)
+                       return true;
+
+               return false;
+       }
+
+       if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+               dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response);
+
+       return true;
+}
+
+bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
+                              uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
+{
+       struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
+       bool result = false, reallow_idle = false;
+
+       if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+               return false;
+
+       if (dc_dmub_srv->idle_allowed) {
+               dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
+               reallow_idle = true;
+       }
+
+       result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
+
+       if (result && reallow_idle)
+               dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
+
+       return result;
+}
index c25ce7546f718679196b5b9c0b4c0f707755e876..952bfb368886e3adfc371afd671298f741024daf 100644 (file)
@@ -50,6 +50,8 @@ struct dc_dmub_srv {
 
        struct dc_context *ctx;
        void *dm;
+
+       bool idle_allowed;
 };
 
 void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
@@ -100,8 +102,59 @@ void dc_dmub_srv_enable_dpia_trace(const struct dc *dc);
 void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index);
 
 bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait);
-void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle);
-void dc_dmub_srv_exit_low_power_state(const struct dc *dc);
+
+void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle);
 
 void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState);
+
+/**
+ * dc_wake_and_execute_dmub_cmd() - Wrapper for DMUB command execution.
+ *
+ * Refer to dc_wake_and_execute_dmub_cmd_list() for usage and limitations,
+ * This function is a convenience wrapper for a single command execution.
+ *
+ * @ctx: DC context
+ * @cmd: The command to send/receive
+ * @wait_type: The wait behavior for the execution
+ *
+ * Return: true on command submission success, false otherwise
+ */
+bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
+                                 enum dm_dmub_wait_type wait_type);
+
+/**
+ * dc_wake_and_execute_dmub_cmd_list() - Wrapper for DMUB command list execution.
+ *
+ * If the DMCUB hardware was asleep then it wakes the DMUB before
+ * executing the command and attempts to re-enter if the command
+ * submission was successful.
+ *
+ * This should be the preferred command submission interface provided
+ * the DC lock is acquired.
+ *
+ * Entry/exit out of idle power optimizations would need to be
+ * manually performed otherwise through dc_allow_idle_optimizations().
+ *
+ * @ctx: DC context
+ * @count: Number of commands to send/receive
+ * @cmd: Array of commands to send
+ * @wait_type: The wait behavior for the execution
+ *
+ * Return: true on command submission success, false otherwise
+ */
+bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
+                                      union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+
+/**
+ * dc_wake_and_execute_gpint()
+ *
+ * @ctx: DC context
+ * @command_code: The command ID to send to DMCUB
+ * @param: The parameter to message DMCUB
+ * @response: Optional response out value - may be NULL.
+ * @wait_type: The wait behavior for the execution
+ */
+bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
+                              uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type);
+
 #endif /* _DMUB_DC_SRV_H_ */
index eeeeeef4d717345e85296ffcc8fc28409909978b..1cb7765f593aa679905fc83ec4a441b076129be1 100644 (file)
@@ -1377,6 +1377,12 @@ struct dp_trace {
 #ifndef DP_TUNNELING_STATUS
 #define DP_TUNNELING_STATUS                            0xE0025 /* 1.4a */
 #endif
+#ifndef DP_TUNNELING_MAX_LINK_RATE
+#define DP_TUNNELING_MAX_LINK_RATE                     0xE0028 /* 1.4a */
+#endif
+#ifndef DP_TUNNELING_MAX_LANE_COUNT
+#define DP_TUNNELING_MAX_LANE_COUNT                    0xE0029 /* 1.4a */
+#endif
 #ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
 #define DPTX_BW_ALLOCATION_MODE_CONTROL                        0xE0030 /* 1.4a */
 #endif
index cb6eaddab72056196af376a1421a23517133afce..8f9a678256150bdefcc56a360ba2cdfda05515be 100644 (file)
@@ -50,7 +50,7 @@ static inline void submit_dmub_read_modify_write(
        cmd_buf->header.payload_bytes =
                        sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
 
-       dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
+       dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
 
        memset(cmd_buf, 0, sizeof(*cmd_buf));
 
@@ -67,7 +67,7 @@ static inline void submit_dmub_burst_write(
        cmd_buf->header.payload_bytes =
                        sizeof(uint32_t) * offload->reg_seq_count;
 
-       dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
+       dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
 
        memset(cmd_buf, 0, sizeof(*cmd_buf));
 
@@ -80,7 +80,7 @@ static inline void submit_dmub_reg_wait(
 {
        struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
 
-       dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
+       dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
 
        memset(cmd_buf, 0, sizeof(*cmd_buf));
        offload->reg_seq_count = 0;
index e2a3aa8812df496e7286e9fcd2601ae27767e487..811474f4419bd264a7774676542e09a4f2e1f328 100644 (file)
@@ -244,7 +244,7 @@ enum pixel_format {
 #define DC_MAX_DIRTY_RECTS 3
 struct dc_flip_addrs {
        struct dc_plane_address address;
-       unsigned int flip_timestamp_in_us;
+       unsigned long long flip_timestamp_in_us;
        bool flip_immediate;
        /* TODO: add flip duration for FreeSync */
        bool triplebuffer_flips;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
new file mode 100644 (file)
index 0000000..ef380ca
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_PLANE_H_
+#define _DC_PLANE_H_
+
+#include "dc.h"
+#include "dc_hw_types.h"
+
+struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+const struct dc_plane_status *dc_plane_get_status(
+               const struct dc_plane_state *plane_state);
+void dc_plane_state_retain(struct dc_plane_state *plane_state);
+void dc_plane_state_release(struct dc_plane_state *plane_state);
+
+#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
new file mode 100644 (file)
index 0000000..9ee184c
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_PLANE_PRIV_H_
+#define _DC_PLANE_PRIV_H_
+
+#include "dc_plane.h"
+
+void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state);
+void dc_plane_destruct(struct dc_plane_state *plane_state);
+
+#endif /* _DC_PLANE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h
new file mode 100644 (file)
index 0000000..d167fdb
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STATE_H_
+#define _DC_STATE_H_
+
+#include "dc.h"
+#include "inc/core_status.h"
+
+struct dc_state *dc_state_create(struct dc *dc);
+void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state);
+struct dc_state *dc_state_create_copy(struct dc_state *src_state);
+void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state);
+struct dc_state *dc_state_create_current_copy(struct dc *dc);
+void dc_state_construct(struct dc *dc, struct dc_state *state);
+void dc_state_destruct(struct dc_state *state);
+void dc_state_retain(struct dc_state *state);
+void dc_state_release(struct dc_state *state);
+
+enum dc_status dc_state_add_stream(struct dc *dc,
+                                   struct dc_state *state,
+                                   struct dc_stream_state *stream);
+
+enum dc_status dc_state_remove_stream(
+               struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *stream);
+
+bool dc_state_add_plane(
+               const struct dc *dc,
+               struct dc_stream_state *stream,
+               struct dc_plane_state *plane_state,
+               struct dc_state *state);
+
+bool dc_state_remove_plane(
+               const struct dc *dc,
+               struct dc_stream_state *stream,
+               struct dc_plane_state *plane_state,
+               struct dc_state *state);
+
+bool dc_state_rem_all_planes_for_stream(
+               const struct dc *dc,
+               struct dc_stream_state *stream,
+               struct dc_state *state);
+
+bool dc_state_add_all_planes_for_stream(
+               const struct dc *dc,
+               struct dc_stream_state *stream,
+               struct dc_plane_state * const *plane_states,
+               int plane_count,
+               struct dc_state *state);
+
+struct dc_stream_status *dc_state_get_stream_status(
+       struct dc_state *state,
+       struct dc_stream_state *stream);
+#endif /* _DC_STATE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
new file mode 100644 (file)
index 0000000..c1f44e0
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STATE_PRIV_H_
+#define _DC_STATE_PRIV_H_
+
+#include "dc_state.h"
+#include "dc_stream.h"
+
+/* Get the type of the provided resource (none, phantom, main) based on the provided
+ * context. If the context is unavailable, determine only if phantom or not.
+ */
+enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state,
+               const struct pipe_ctx *pipe_ctx);
+enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state,
+               const struct dc_stream_state *stream);
+
+/* Gets the phantom stream if main is provided, gets the main if phantom is provided.*/
+struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state,
+               const struct dc_stream_state *stream);
+
+/* allocate's phantom stream or plane and returns pointer to the object */
+struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *main_stream);
+struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+               struct dc_state *state,
+               struct dc_plane_state *main_plane);
+
+/* deallocate's phantom stream or plane */
+void dc_state_release_phantom_stream(const struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *phantom_stream);
+void dc_state_release_phantom_plane(const struct dc *dc,
+               struct dc_state *state,
+               struct dc_plane_state *phantom_plane);
+
+/* add/remove phantom stream to context and generate subvp meta data */
+enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *phantom_stream,
+               struct dc_stream_state *main_stream);
+enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *phantom_stream);
+
+bool dc_state_add_phantom_plane(
+               const struct dc *dc,
+               struct dc_stream_state *phantom_stream,
+               struct dc_plane_state *phantom_plane,
+               struct dc_state *state);
+
+bool dc_state_remove_phantom_plane(
+               const struct dc *dc,
+               struct dc_stream_state *phantom_stream,
+               struct dc_plane_state *phantom_plane,
+               struct dc_state *state);
+
+bool dc_state_rem_all_phantom_planes_for_stream(
+               const struct dc *dc,
+               struct dc_stream_state *phantom_stream,
+               struct dc_state *state,
+               bool should_release_planes);
+
+bool dc_state_add_all_phantom_planes_for_stream(
+               const struct dc *dc,
+               struct dc_stream_state *phantom_stream,
+               struct dc_plane_state * const *phantom_planes,
+               int plane_count,
+               struct dc_state *state);
+
+bool dc_state_remove_phantom_streams_and_planes(
+               struct dc *dc,
+               struct dc_state *state);
+
+void dc_state_release_phantom_streams_and_planes(
+               struct dc *dc,
+               struct dc_state *state);
+
+#endif /* _DC_STATE_PRIV_H_ */
index 4ac48c346a3399d510844195fdee3897a1999029..ee10941caa5980999044407184e7a41b8548e6b0 100644 (file)
@@ -38,6 +38,14 @@ struct timing_sync_info {
        bool master;
 };
 
+struct mall_stream_config {
+       /* MALL stream config to indicate if the stream is phantom or not.
+        * We will use a phantom stream to indicate that the pipe is phantom.
+        */
+       enum mall_stream_type type;
+       struct dc_stream_state *paired_stream;  // master / slave stream
+};
+
 struct dc_stream_status {
        int primary_otg_inst;
        int stream_enc_inst;
@@ -50,6 +58,7 @@ struct dc_stream_status {
        struct timing_sync_info timing_sync_info;
        struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
        bool is_abm_supported;
+       struct mall_stream_config mall_stream_config;
 };
 
 enum hubp_dmdata_mode {
@@ -147,31 +156,6 @@ struct test_pattern {
 
 #define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR)
 
-enum mall_stream_type {
-       SUBVP_NONE, // subvp not in use
-       SUBVP_MAIN, // subvp in use, this stream is main stream
-       SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
-};
-
-struct mall_stream_config {
-       /* MALL stream config to indicate if the stream is phantom or not.
-        * We will use a phantom stream to indicate that the pipe is phantom.
-        */
-       enum mall_stream_type type;
-       struct dc_stream_state *paired_stream;  // master / slave stream
-};
-
-/* Temp struct used to save and restore MALL config
- * during validation.
- *
- * TODO: Move MALL config into dc_state instead of stream struct
- * to avoid needing to save/restore.
- */
-struct mall_temp_config {
-       struct mall_stream_config mall_stream_config[MAX_PIPES];
-       bool is_phantom_plane[MAX_PIPES];
-};
-
 struct dc_stream_debug_options {
        char force_odm_combine_segments;
 };
@@ -301,7 +285,7 @@ struct dc_stream_state {
        bool has_non_synchronizable_pclk;
        bool vblank_synchronized;
        bool fpo_in_use;
-       struct mall_stream_config mall_stream_config;
+       bool is_phantom;
 };
 
 #define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -415,41 +399,6 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
                                  uint32_t *h_position,
                                  uint32_t *v_position);
 
-enum dc_status dc_add_stream_to_ctx(
-                       struct dc *dc,
-               struct dc_state *new_ctx,
-               struct dc_stream_state *stream);
-
-enum dc_status dc_remove_stream_from_ctx(
-               struct dc *dc,
-                       struct dc_state *new_ctx,
-                       struct dc_stream_state *stream);
-
-
-bool dc_add_plane_to_context(
-               const struct dc *dc,
-               struct dc_stream_state *stream,
-               struct dc_plane_state *plane_state,
-               struct dc_state *context);
-
-bool dc_remove_plane_from_context(
-               const struct dc *dc,
-               struct dc_stream_state *stream,
-               struct dc_plane_state *plane_state,
-               struct dc_state *context);
-
-bool dc_rem_all_planes_for_stream(
-               const struct dc *dc,
-               struct dc_stream_state *stream,
-               struct dc_state *context);
-
-bool dc_add_all_planes_for_stream(
-               const struct dc *dc,
-               struct dc_stream_state *stream,
-               struct dc_plane_state * const *plane_states,
-               int plane_count,
-               struct dc_state *context);
-
 bool dc_stream_add_writeback(struct dc *dc,
                struct dc_stream_state *stream,
                struct dc_writeback_info *wb_info);
@@ -518,9 +467,6 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
 void dc_stream_retain(struct dc_stream_state *dc_stream);
 void dc_stream_release(struct dc_stream_state *dc_stream);
 
-struct dc_stream_status *dc_stream_get_status_from_state(
-       struct dc_state *state,
-       struct dc_stream_state *stream);
 struct dc_stream_status *dc_stream_get_status(
        struct dc_stream_state *dc_stream);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h
new file mode 100644 (file)
index 0000000..7476fd5
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STREAM_PRIV_H_
+#define _DC_STREAM_PRIV_H_
+
+#include "dc_stream.h"
+
+bool dc_stream_construct(struct dc_stream_state *stream,
+       struct dc_sink *dc_sink_data);
+void dc_stream_destruct(struct dc_stream_state *stream);
+
+void dc_stream_assign_stream_id(struct dc_stream_state *stream);
+
+#endif // _DC_STREAM_PRIV_H_
index 7313cfe69498c0086b2f5c25f36e0540250d904e..b08ccb8c68bc366386e82a566c452459da0aabdc 100644 (file)
@@ -1140,25 +1140,34 @@ struct dc_panel_config {
        } ilr;
 };
 
+#define MAX_SINKS_PER_LINK 4
+
 /*
  *  USB4 DPIA BW ALLOCATION STRUCTS
  */
 struct dc_dpia_bw_alloc {
-       int sink_verified_bw;  // The Verified BW that sink can allocated and use that has been verified already
-       int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
-       int sink_max_bw;       // The Max BW that sink can require/support
+       int remote_sink_req_bw[MAX_SINKS_PER_LINK]; // BW requested by remote sinks
+       int link_verified_bw;  // The Verified BW that link can allocated and use that has been verified already
+       int link_max_bw;       // The Max BW that link can require/support
+       int allocated_bw;      // The Actual Allocated BW for this DPIA
        int estimated_bw;      // The estimated available BW for this DPIA
        int bw_granularity;    // BW Granularity
+       int dp_overhead;       // DP overhead in dp tunneling
        bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3:  DP-Tx & Dpia & CM
        bool response_ready;   // Response ready from the CM side
+       uint8_t nrd_max_lane_count; // Non-reduced max lane count
+       uint8_t nrd_max_link_rate; // Non-reduced max link rate
 };
 
-#define MAX_SINKS_PER_LINK 4
-
 enum dc_hpd_enable_select {
        HPD_EN_FOR_ALL_EDP = 0,
        HPD_EN_FOR_PRIMARY_EDP_ONLY,
        HPD_EN_FOR_SECONDARY_EDP_ONLY,
 };
 
+enum mall_stream_type {
+       SUBVP_NONE, // subvp not in use
+       SUBVP_MAIN, // subvp in use, this stream is main stream
+       SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
+};
 #endif /* DC_TYPES_H_ */
index 874b132fe1d782f330353c628bf892da03f4618c..a6006776333d12f2da868c9699a1dd350ed803d5 100644 (file)
@@ -135,7 +135,7 @@ static void dmcu_set_backlight_level(
                        0, 1, 80000);
 }
 
-static void dce_abm_init(struct abm *abm, uint32_t backlight)
+static void dce_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 
@@ -162,7 +162,7 @@ static void dce_abm_init(struct abm *abm, uint32_t backlight)
                        BL1_PWM_TARGET_ABM_LEVEL, backlight);
 
        REG_UPDATE(BL1_PWM_USER_LEVEL,
-                       BL1_PWM_USER_LEVEL, backlight);
+                       BL1_PWM_USER_LEVEL, user_level);
 
        REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
                        ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
index 140598f18bbdd4cb4758ec7e9ec17c91286d0ecc..f0458b8f00af842b87ab91feadd71eef4c680e27 100644 (file)
@@ -782,7 +782,7 @@ static void get_azalia_clock_info_dp(
        /*audio_dto_module = dpDtoSourceClockInkhz * 10,000;
         *  [khz] ->[100Hz] */
        azalia_clock_info->audio_dto_module =
-               pll_info->dp_dto_source_clock_in_khz * 10;
+               pll_info->audio_dto_source_clock_in_khz * 10;
 }
 
 void dce_aud_wall_dto_setup(
index 5d3f6fa1011e8e33f5e7772bee445cb6602e278d..970644b695cd4f1d96f166cc1786987b460cdafd 100644 (file)
@@ -975,6 +975,9 @@ static bool dcn31_program_pix_clk(
                        look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10);
        struct bp_pixel_clock_parameters bp_pc_params = {0};
        enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
+
+       if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+               dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
        // For these signal types Driver to program DP_DTO without calling VBIOS Command table
        if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
                if (e) {
@@ -1088,6 +1091,10 @@ static bool get_pixel_clk_frequency_100hz(
        struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
        unsigned int clock_hz = 0;
        unsigned int modulo_hz = 0;
+       unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
+
+       if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+               dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
 
        if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
                clock_hz = REG_READ(PHASE[inst]);
@@ -1100,7 +1107,7 @@ static bool get_pixel_clk_frequency_100hz(
                        modulo_hz = REG_READ(MODULO[inst]);
                        if (modulo_hz)
                                *pixel_clk_khz = div_u64((uint64_t)clock_hz*
-                                       clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
+                                       dp_dto_ref_khz*10,
                                        modulo_hz);
                        else
                                *pixel_clk_khz = 0;
index 8c5e7f858be38a69f2ad5e4ea8e3937541e9d045..ccc154b0281c2c9274f7e28af06ab03288d592f9 100644 (file)
@@ -57,9 +57,9 @@ static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst
        return ret;
 }
 
-static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight)
+static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight, uint32_t user_level)
 {
-       dmub_abm_init(abm, backlight);
+       dmub_abm_init(abm, backlight, user_level);
 }
 
 static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm)
index 42c802afc4681b5e2e93cf41c8718b468643115e..f9d6a181164aac70024480550a56e125cd96b167 100644 (file)
@@ -76,10 +76,10 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
        cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask;
        cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
-void dmub_abm_init(struct abm *abm, uint32_t backlight)
+void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level)
 {
        struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
 
@@ -106,7 +106,7 @@ void dmub_abm_init(struct abm *abm, uint32_t backlight)
                        BL1_PWM_TARGET_ABM_LEVEL, backlight);
 
        REG_UPDATE(BL1_PWM_USER_LEVEL,
-                       BL1_PWM_USER_LEVEL, backlight);
+                       BL1_PWM_USER_LEVEL, user_level);
 
        REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
                        ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
@@ -155,7 +155,7 @@ bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask)
        cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask;
        cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
@@ -186,7 +186,7 @@ void dmub_abm_init_config(struct abm *abm,
 
        cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
 }
 
@@ -203,7 +203,7 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un
        cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
        cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
@@ -246,7 +246,7 @@ bool dmub_abm_save_restore(
 
        cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        // Copy iramtable data into local structure
        memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
@@ -274,7 +274,7 @@ bool dmub_abm_set_pipe(struct abm *abm,
        cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
        cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
@@ -296,7 +296,7 @@ bool dmub_abm_set_backlight_level(struct abm *abm,
        cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
        cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
index 07ea6c8d414f3baa5ec64d3eb57a162796db5004..761685e5b8c91eabb4d5a5ffb11c8d4299234ccc 100644 (file)
@@ -30,7 +30,7 @@
 
 struct abm_save_restore;
 
-void dmub_abm_init(struct abm *abm, uint32_t backlight);
+void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level);
 bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
 unsigned int dmub_abm_get_current_backlight(struct abm *abm);
 unsigned int dmub_abm_get_target_backlight(struct abm *abm);
index 2aa0e01a6891b07e00143d7e803774a822616abf..ba1fec3016d5ba98742cbe184200b626f25bea1a 100644 (file)
@@ -47,7 +47,7 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
        if (!lock)
                cmd.lock_hw.lock_hw_data.should_release = 1;
 
-       dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
index d8009b2dc56a068ae42e088b52cacbf4f9b0f053..98a778996e1a9176e5ab3f136ce890ecddbcb6aa 100644 (file)
@@ -48,5 +48,5 @@ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv)
                sizeof(cmd.outbox1_enable.header);
        cmd.outbox1_enable.enable = true;
 
-       dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
index 9d4170a356a207bb1c4189bab591924fcec078af..3e243e407bb87ec6934eea4899baa78cc0992423 100644 (file)
@@ -105,23 +105,18 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
  */
 static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state, uint8_t panel_inst)
 {
-       struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
        uint32_t raw_state = 0;
        uint32_t retry_count = 0;
-       enum dmub_status status;
 
        do {
                // Send gpint command and wait for ack
-               status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, panel_inst, 30);
-
-               if (status == DMUB_STATUS_OK) {
-                       // GPINT was executed, get response
-                       dmub_srv_get_gpint_response(srv, &raw_state);
+               if (dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__GET_PSR_STATE, panel_inst, &raw_state,
+                                             DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
                        *state = convert_psr_state(raw_state);
-               } else
+               } else {
                        // Return invalid state when GPINT times out
                        *state = PSR_STATE_INVALID;
-
+               }
        } while (++retry_count <= 1000 && *state == PSR_STATE_INVALID);
 
        // Assert if max retry hit
@@ -171,7 +166,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
        cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
        cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
@@ -199,7 +194,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
 
        cmd.psr_enable.header.payload_bytes = 0; // Send header only
 
-       dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        /* Below loops 1000 x 500us = 500 ms.
         *  Exit PSR may need to wait 1-2 frames to power up. Timeout after at
@@ -248,7 +243,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
        cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
        cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
        cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 /*
@@ -267,7 +262,7 @@ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub,
        cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle;
        cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su;
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 /*
@@ -286,7 +281,7 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt
        cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
        cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 /*
@@ -423,7 +418,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
                copy_settings_data->relock_delay_frame_cnt = 2;
        copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
@@ -444,7 +439,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
        cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
        cmd.psr_enable.header.payload_bytes = 0;
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 /*
@@ -452,13 +447,11 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
  */
 static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst)
 {
-       struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
        uint16_t param = (uint16_t)(panel_inst << 8);
 
        /* Send gpint command and wait for ack */
-       dmub_srv_send_gpint_command(srv, DMUB_GPINT__PSR_RESIDENCY, param, 30);
-
-       dmub_srv_get_gpint_response(srv, residency);
+       dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__PSR_RESIDENCY, param, residency,
+                                 DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
 }
 
 static const struct dmub_psr_funcs psr_funcs = {
index 28149e53c2a68fb2b956c9415cc258b0ca63d826..38e4797e9476ca8de7ba6ad92c6906db5f9823c1 100644 (file)
@@ -258,13 +258,97 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
                *residency = 0;
 }
 
+/**
+ * Set REPLAY power optimization flags and coasting vtotal.
+ */
+static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
+               unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal)
+{
+       union dmub_rb_cmd cmd;
+       struct dc_context *dc = dmub->ctx;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.replay_set_power_opt_and_coasting_vtotal.header.type = DMUB_CMD__REPLAY;
+       cmd.replay_set_power_opt_and_coasting_vtotal.header.sub_type =
+               DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
+       cmd.replay_set_power_opt_and_coasting_vtotal.header.payload_bytes =
+               sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+       cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.power_opt = power_opt;
+       cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.panel_inst = panel_inst;
+       cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
+
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+/**
+ * send Replay general cmd to DMUB.
+ */
+static void dmub_replay_send_cmd(struct dmub_replay *dmub,
+               enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element)
+{
+       union dmub_rb_cmd cmd;
+       struct dc_context *ctx = NULL;
+
+       if (dmub == NULL || cmd_element == NULL)
+               return;
+
+       ctx = dmub->ctx;
+       if (ctx != NULL) {
+
+               if (msg != Replay_Msg_Not_Support) {
+                       memset(&cmd, 0, sizeof(cmd));
+                       //Header
+                       cmd.replay_set_timing_sync.header.type = DMUB_CMD__REPLAY;
+               } else
+                       return;
+       } else
+               return;
+
+       switch (msg) {
+       case Replay_Set_Timing_Sync_Supported:
+               //Header
+               cmd.replay_set_timing_sync.header.sub_type =
+                       DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
+               cmd.replay_set_timing_sync.header.payload_bytes =
+                       sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
+               //Cmd Body
+               cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
+                                               cmd_element->sync_data.panel_inst;
+               cmd.replay_set_timing_sync.replay_set_timing_sync_data.timing_sync_supported =
+                                               cmd_element->sync_data.timing_sync_supported;
+               break;
+       case Replay_Set_Residency_Frameupdate_Timer:
+               //Header
+               cmd.replay_set_frameupdate_timer.header.sub_type =
+                       DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
+               cmd.replay_set_frameupdate_timer.header.payload_bytes =
+                       sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
+               //Cmd Body
+               cmd.replay_set_frameupdate_timer.data.panel_inst =
+                                               cmd_element->panel_inst;
+               cmd.replay_set_frameupdate_timer.data.enable =
+                                               cmd_element->timer_data.enable;
+               cmd.replay_set_frameupdate_timer.data.frameupdate_count =
+                                               cmd_element->timer_data.frameupdate_count;
+               break;
+       case Replay_Msg_Not_Support:
+       default:
+               return;
+               break;
+       }
+
+       dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
 static const struct dmub_replay_funcs replay_funcs = {
-       .replay_copy_settings           = dmub_replay_copy_settings,
-       .replay_enable                  = dmub_replay_enable,
-       .replay_get_state               = dmub_replay_get_state,
-       .replay_set_power_opt           = dmub_replay_set_power_opt,
-       .replay_set_coasting_vtotal     = dmub_replay_set_coasting_vtotal,
-       .replay_residency               = dmub_replay_residency,
+       .replay_copy_settings                           = dmub_replay_copy_settings,
+       .replay_enable                                  = dmub_replay_enable,
+       .replay_get_state                               = dmub_replay_get_state,
+       .replay_set_power_opt                           = dmub_replay_set_power_opt,
+       .replay_set_coasting_vtotal                     = dmub_replay_set_coasting_vtotal,
+       .replay_residency                               = dmub_replay_residency,
+       .replay_set_power_opt_and_coasting_vtotal       = dmub_replay_set_power_opt_and_coasting_vtotal,
+       .replay_send_cmd                                = dmub_replay_send_cmd,
 };
 
 /*
index b3ee90a0b8b3d28e3115ff4497a20098d9369670..3613aff994d725362dabd5e5cbb75cfc576d1a52 100644 (file)
@@ -51,6 +51,8 @@ struct dmub_replay_funcs {
                uint8_t panel_inst);
        void (*replay_residency)(struct dmub_replay *dmub,
                uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm);
+       void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub,
+               unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal);
 };
 
 struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
index 0dd62934a18ce9497dcaadcfc2a3b3364fd52670..ae6a131be71b6bb2676c7c13556c371af7bb4760 100644 (file)
@@ -22,7 +22,7 @@
 #
 # Makefile for DCN.
 
-DCN10 = dcn10_init.o dcn10_ipp.o \
+DCN10 = dcn10_ipp.o \
                dcn10_hw_sequencer_debug.o \
                dcn10_dpp.o dcn10_opp.o \
                dcn10_hubp.o dcn10_mpc.o \
index bd760442ff8935dbb31d8fe69ecf348c34927b42..3dae3943b056c44aaedaa9c90aae42d8f5ac781d 100644 (file)
@@ -2,7 +2,7 @@
 #
 # Makefile for DCN.
 
-DCN20 = dcn20_init.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
                dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \
                dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
                dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
index a101e651155500a977c4138f7050f6d83f65520b..2b0b4f32e13bf84e74c572778c5dd4fd2564e853 100644 (file)
@@ -1,8 +1,7 @@
 # SPDX-License-Identifier: MIT
 #
 # Makefile for DCN.
-DCN201 = dcn201_init.o \
-       dcn201_hubbub.o\
+DCN201 = dcn201_hubbub.o\
        dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \
        dcn201_dccg.o dcn201_link_encoder.o
 
index dd1eea7212f47058530c2cbbed849c4c85b1884c..ca92f5c8e7fb70e7a37918d43ee552185c796c26 100644 (file)
@@ -2,7 +2,7 @@
 #
 # Makefile for DCN21.
 
-DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o \
+DCN21 = dcn21_hubp.o dcn21_hubbub.o \
         dcn21_link_encoder.o dcn21_dccg.o
 
 AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
index 68cad55c72ab8c6096379c03f22f6666ec085124..e13d69a22c1c7fc91fe69696408a46951deb1ceb 100644 (file)
@@ -691,7 +691,7 @@ static void dmcub_PLAT_54186_wa(struct hubp *hubp,
        cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
 
        PERF_TRACE();  // TODO: remove after performance is stable.
-       dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
        PERF_TRACE();  // TODO: remove after performance is stable.
 }
 
index cd95f322235e9e47ce2d55dec3fdd6a1d9f52d37..b5b2aa3b3783972a5b81263769f3c6c00e7c1144 100644 (file)
@@ -23,9 +23,7 @@
 #
 #
 
-DCN30 := \
-       dcn30_init.o \
-       dcn30_hubbub.o \
+DCN30 := dcn30_hubbub.o \
        dcn30_hubp.o \
        dcn30_dpp.o \
        dcn30_dccg.o \
index 090011300dcdb02a3395baae22b1760021469269..d241f665e40ac4251bbc9bdb97419015ffa6db9a 100644 (file)
@@ -10,7 +10,7 @@
 #
 # Makefile for dcn30.
 
-DCN301 = dcn301_init.o dcn301_dccg.o \
+DCN301 = dcn301_dccg.o \
                dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o
 
 AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
deleted file mode 100644 (file)
index 0fcd035..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
-#
-#  Authors: AMD
-#
-# Makefile for dcn302.
-
-DCN3_02 = dcn302_init.o
-
-AMD_DAL_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/dcn302/,$(DCN3_02))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_02)
index 11a2662e58ef344a1764583ffe32d73dd902444e..5d93ac16c03a9481ac4c25109c9c3b2d6a9b5639 100644 (file)
@@ -10,7 +10,7 @@
 #
 # Makefile for dcn31.
 
-DCN31 = dcn31_hubbub.o dcn31_init.o dcn31_hubp.o \
+DCN31 = dcn31_hubbub.o dcn31_hubp.o \
        dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
        dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
        dcn31_afmt.o dcn31_vpg.o
index 4596f3bac1b4c7654974c75bdc5bae9a32f3ab2d..26be5fee7411d9db79d7fe3a3117082675588e31 100644 (file)
@@ -125,7 +125,7 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc,
        cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
        cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
 
-       if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+       if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
                return false;
 
        return true;
@@ -436,7 +436,7 @@ static bool link_dpia_control(struct dc_context *dc_ctx,
 
        cmd.dig1_dpia_control.dpia_control = *dpia_control;
 
-       dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
index d849b1eaa4a5c3dcf11c32e0e4ea8fda1e6e8ee0..03248422d6ffde2d6923fb33185bf8dd12607787 100644 (file)
@@ -52,7 +52,7 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub
        cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data);
        cmd->panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
 
-       return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+       return dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
 }
 
 static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
@@ -85,7 +85,7 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
                panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
        cmd.panel_cntl.data.bl_pwm_ref_div2 =
                panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2;
-       if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+       if (!dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
                return 0;
 
        panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl;
index d5c177346a3bfb1533c7c1d052bebe903da97fe8..b134ab05aa71267d406cc2292ba7bbfd7474b1ea 100644 (file)
@@ -10,8 +10,7 @@
 #
 # Makefile for dcn314.
 
-DCN314 = dcn314_init.o \
-               dcn314_dio_stream_encoder.o dcn314_dccg.o
+DCN314 = dcn314_dio_stream_encoder.o dcn314_dccg.o
 
 AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
 
index 905b74b5309201e5bf34c7d280e6afc83413167f..5314770fff1c0608bcf75c056738e6b816929509 100644 (file)
@@ -10,7 +10,7 @@
 #
 # Makefile for dcn32.
 
-DCN32 = dcn32_hubbub.o dcn32_init.o dcn32_dccg.o \
+DCN32 = dcn32_hubbub.o dcn32_dccg.o \
                dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \
                dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
                dcn32_hpo_dp_link_encoder.o
index e8159a459bcef2fea898577219452c6ad82e2a94..87760600e154dad46e911e28f0b2937e6e012602 100644 (file)
@@ -28,6 +28,7 @@
 #include "dcn20/dcn20_resource.h"
 #include "dml/dcn32/display_mode_vba_util_32.h"
 #include "dml/dcn32/dcn32_fpu.h"
+#include "dc_state_priv.h"
 
 static bool is_dual_plane(enum surface_pixel_format format)
 {
@@ -190,7 +191,7 @@ bool dcn32_subvp_in_use(struct dc *dc,
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
-               if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE)
+               if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
                        return true;
        }
        return false;
@@ -264,18 +265,17 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint
 
        // Do not override if a stream has multiple planes
        for (i = 0; i < context->stream_count; i++) {
-               if (context->stream_status[i].plane_count > 1) {
+               if (context->stream_status[i].plane_count > 1)
                        return;
-               }
-               if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
+
+               if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
                        stream_count++;
-               }
        }
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
-               if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+               if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
                        if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) {
 
                                if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
@@ -290,7 +290,7 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint
                for (i = 0; i < dc->res_pool->pipe_count; i++) {
                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
-                       if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+                       if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
                                if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
                                        if (pipe_segments[i] > 4)
                                                pipe_segments[i] = 4;
@@ -337,14 +337,14 @@ void dcn32_determine_det_override(struct dc *dc,
 
        for (i = 0; i < context->stream_count; i++) {
                /* Don't count SubVP streams for DET allocation */
-               if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM)
+               if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
                        stream_count++;
        }
 
        if (stream_count > 0) {
                stream_segments = 18 / stream_count;
                for (i = 0; i < context->stream_count; i++) {
-                       if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+                       if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM)
                                continue;
 
                        if (context->stream_status[i].plane_count > 0)
@@ -430,71 +430,6 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
                dcn32_determine_det_override(dc, context, pipes);
 }
 
-/**
- * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases
- *
- * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
- * there are situations where a shallow copy of the dc->current_state is created for the
- * validation. In this case we want to save and restore the mall config because we always
- * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
- * fast validation). If we don't restore the subvp config in cases of fast validation +
- * shallow copy of the dc->current_state, the dc->current_state will have a partially
- * removed subvp state when we did not intend to remove it.
- *
- * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
- *       validation. We don't expect this to happen in fast_validation=1 cases.
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed
- * @temp_config: struct used to cache the existing MALL state
- *
- * Return: void
- */
-void dcn32_save_mall_state(struct dc *dc,
-               struct dc_state *context,
-               struct mall_temp_config *temp_config)
-{
-       uint32_t i;
-
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
-               if (pipe->stream)
-                       temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
-
-               if (pipe->plane_state)
-                       temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
-       }
-}
-
-/**
- * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases
- *
- * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed, restore MALL state into here
- * @temp_config: struct that has the cached MALL state
- *
- * Return: void
- */
-void dcn32_restore_mall_state(struct dc *dc,
-               struct dc_state *context,
-               struct mall_temp_config *temp_config)
-{
-       uint32_t i;
-
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
-               if (pipe->stream)
-                       pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
-
-               if (pipe->plane_state)
-                       pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
-       }
-}
-
 #define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW)
 /*
  * Scaling factor for v_blank stretch calculations considering timing in
@@ -589,13 +524,14 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream)
  *
  * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
  */
-struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context)
+struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context)
 {
        int refresh_rate = 0;
        const int minimum_refreshrate_supported = 120;
        struct dc_stream_state *fpo_candidate_stream = NULL;
        bool is_fpo_vactive = false;
        uint32_t fpo_vactive_margin_us = 0;
+       struct dc_stream_status *fpo_stream_status = NULL;
 
        if (context == NULL)
                return NULL;
@@ -618,16 +554,28 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
                DC_FP_START();
                dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream);
                DC_FP_END();
-
+               if (fpo_candidate_stream)
+                       fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
                DC_FP_START();
                is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us);
                DC_FP_END();
                if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
                        return NULL;
-       } else
+       } else {
                fpo_candidate_stream = context->streams[0];
+               if (fpo_candidate_stream)
+                       fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
+       }
 
-       if (!fpo_candidate_stream)
+       /* In DCN32/321, FPO uses per-pipe P-State force.
+        * If there's no planes, HUBP is power gated and
+        * therefore programming UCLK_PSTATE_FORCE does
+        * nothing (P-State will always be asserted naturally
+        * on a pipe that has HUBP power gated. Therefore we
+        * only want to enable FPO if the FPO pipe has both
+        * a stream and a plane.
+        */
+       if (!fpo_candidate_stream || !fpo_stream_status || fpo_stream_status->plane_count == 0)
                return NULL;
 
        if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams)
@@ -716,10 +664,11 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+               enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 
                if (resource_is_pipe_type(pipe, OPP_HEAD) &&
                                resource_is_pipe_type(pipe, DPP_PIPE)) {
-                       if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+                       if (pipe_mall_type == SUBVP_MAIN) {
                                subvp_count++;
 
                                subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
@@ -728,7 +677,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
                                refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
                                refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
                        }
-                       if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+                       if (pipe_mall_type == SUBVP_NONE) {
                                non_subvp_pipes++;
                                drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
                                if (pipe->stream->ignore_msa_timing_param &&
@@ -776,10 +725,11 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+               enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 
                if (resource_is_pipe_type(pipe, OPP_HEAD) &&
                                resource_is_pipe_type(pipe, DPP_PIPE)) {
-                       if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+                       if (pipe_mall_type == SUBVP_MAIN) {
                                subvp_count++;
 
                                subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
@@ -788,7 +738,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
                                refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
                                refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
                        }
-                       if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+                       if (pipe_mall_type == SUBVP_NONE) {
                                non_subvp_pipes++;
                                vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
                                if (pipe->stream->ignore_msa_timing_param &&
index fa7ec82ae5f58bc2d5ff2476e3c305467073c131..0e317e0c36a083cb492fcbbf803ce2bba45a29bf 100644 (file)
@@ -10,7 +10,7 @@
 #
 # Makefile for DCN35.
 
-DCN35 = dcn35_init.o dcn35_dio_stream_encoder.o \
+DCN35 = dcn35_dio_stream_encoder.o \
        dcn35_dio_link_encoder.o dcn35_dccg.o \
        dcn35_hubp.o dcn35_hubbub.o \
        dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o
index f91e0889527555b7afe4f4b4c60d285a23cdf3e8..da94e5309fbaf0f8e06a4a1aad4ce431a8d9f2cc 100644 (file)
@@ -256,6 +256,10 @@ void dcn35_link_encoder_construct(
                enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
                enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
                enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
+               if (bp_cap_info.DP_IS_USB_C) {
+                       /*BIOS not switch to use CONNECTOR_ID_USBC = 24 yet*/
+                       enc10->base.features.flags.bits.DP_IS_USB_C = 1;
+               }
 
        } else {
                DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
@@ -264,4 +268,5 @@ void dcn35_link_encoder_construct(
        }
        if (enc10->base.ctx->dc->debug.hdmi20_disable)
                enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+
 }
index ec77b2b41ba3679164047175981800ee42e89c0e..38ab9ad60ef8bc3734411c89a2f5e42ca41b3cc2 100644 (file)
@@ -33,6 +33,7 @@
 
 #include "link.h"
 #include "dcn20_fpu.h"
+#include "dc_state_priv.h"
 
 #define DC_LOGGER \
        dc->ctx->logger
@@ -440,7 +441,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
        .use_urgent_burst_bw = 0
 };
 
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = {
+       .clock_limits = {
+               {
+                       .state = 0,
+                       .dcfclk_mhz = 560.0,
+                       .fabricclk_mhz = 560.0,
+                       .dispclk_mhz = 513.0,
+                       .dppclk_mhz = 513.0,
+                       .phyclk_mhz = 540.0,
+                       .socclk_mhz = 560.0,
+                       .dscclk_mhz = 171.0,
+                       .dram_speed_mts = 1069.0,
+               },
+               {
+                       .state = 1,
+                       .dcfclk_mhz = 694.0,
+                       .fabricclk_mhz = 694.0,
+                       .dispclk_mhz = 642.0,
+                       .dppclk_mhz = 642.0,
+                       .phyclk_mhz = 600.0,
+                       .socclk_mhz = 694.0,
+                       .dscclk_mhz = 214.0,
+                       .dram_speed_mts = 1324.0,
+               },
+               {
+                       .state = 2,
+                       .dcfclk_mhz = 875.0,
+                       .fabricclk_mhz = 875.0,
+                       .dispclk_mhz = 734.0,
+                       .dppclk_mhz = 734.0,
+                       .phyclk_mhz = 810.0,
+                       .socclk_mhz = 875.0,
+                       .dscclk_mhz = 245.0,
+                       .dram_speed_mts = 1670.0,
+               },
+               {
+                       .state = 3,
+                       .dcfclk_mhz = 1000.0,
+                       .fabricclk_mhz = 1000.0,
+                       .dispclk_mhz = 1100.0,
+                       .dppclk_mhz = 1100.0,
+                       .phyclk_mhz = 810.0,
+                       .socclk_mhz = 1000.0,
+                       .dscclk_mhz = 367.0,
+                       .dram_speed_mts = 2000.0,
+               },
+               {
+                       .state = 4,
+                       .dcfclk_mhz = 1200.0,
+                       .fabricclk_mhz = 1200.0,
+                       .dispclk_mhz = 1284.0,
+                       .dppclk_mhz = 1284.0,
+                       .phyclk_mhz = 810.0,
+                       .socclk_mhz = 1200.0,
+                       .dscclk_mhz = 428.0,
+                       .dram_speed_mts = 2000.0,
+               },
+               {
+                       .state = 5,
+                       .dcfclk_mhz = 1200.0,
+                       .fabricclk_mhz = 1200.0,
+                       .dispclk_mhz = 1284.0,
+                       .dppclk_mhz = 1284.0,
+                       .phyclk_mhz = 810.0,
+                       .socclk_mhz = 1200.0,
+                       .dscclk_mhz = 428.0,
+                       .dram_speed_mts = 2000.0,
+               },
+       },
+
+       .num_states = 5,
+       .sr_exit_time_us = 1.9,
+       .sr_enter_plus_exit_time_us = 4.4,
+       .urgent_latency_us = 3.0,
+       .urgent_latency_pixel_data_only_us = 4.0,
+       .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+       .urgent_latency_vm_data_only_us = 4.0,
+       .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+       .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+       .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+       .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+       .max_avg_sdp_bw_use_normal_percent = 40.0,
+       .max_avg_dram_bw_use_normal_percent = 40.0,
+       .writeback_latency_us = 12.0,
+       .ideal_dram_bw_after_urgent_percent = 40.0,
+       .max_request_size_bytes = 256,
+       .dram_channel_width_bytes = 16,
+       .fabric_datapath_to_dcn_data_return_bytes = 64,
+       .dcn_downspread_percent = 0.5,
+       .downspread_percent = 0.5,
+       .dram_page_open_time_ns = 50.0,
+       .dram_rw_turnaround_time_ns = 17.5,
+       .dram_return_buffer_per_channel_bytes = 8192,
+       .round_trip_ping_latency_dcfclk_cycles = 131,
+       .urgent_out_of_order_return_per_channel_bytes = 4096,
+       .channel_interleave_bytes = 256,
+       .num_banks = 8,
+       .num_chans = 16,
+       .vmm_page_size_bytes = 4096,
+       .dram_clock_change_latency_us = 45.0,
+       .writeback_dram_clock_change_latency_us = 23.0,
+       .return_bus_width_bytes = 64,
+       .dispclk_dppclk_vco_speed_mhz = 3850,
+       .xfc_bus_transport_time_us = 20,
+       .xfc_xbuf_latency_tolerance_us = 50,
+       .use_urgent_burst_bw = 0,
+};
 
 struct _vcs_dpi_ip_params_st dcn2_1_ip = {
        .odm_capable = 1,
@@ -1074,7 +1183,7 @@ void dcn20_calculate_dlg_params(struct dc *dc,
                pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
                pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
 
-               if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+               if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
                        // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
                        context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
                        context->res_ctx.pipe_ctx[i].unbounded_req = false;
@@ -1424,7 +1533,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc,
                 */
                if (res_ctx->pipe_ctx[i].plane_state &&
                                (res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
-                                res_ctx->pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM))
+                               dc_state_get_pipe_subvp_type(context, &res_ctx->pipe_ctx[i]) == SUBVP_PHANTOM))
                        pipes[pipe_cnt].pipe.src.num_cursors = 0;
                else
                        pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
index de209ca0cf8cc5bace9d4e428970bb50d13ad291..9f37f717a1f86f88c5fa41bc30f477406d70f3b8 100644 (file)
@@ -32,6 +32,7 @@
 #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
 #include "dcn30/dcn30_resource.h"
 #include "link.h"
+#include "dc_state_priv.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -341,7 +342,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
                if (!pipe->stream)
                        continue;
 
-               if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+               if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
                        pipes[pipe_idx].pipe.dest.vstartup_start =
                                get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
                        pipes[pipe_idx].pipe.dest.vupdate_offset =
@@ -624,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
                if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
                                !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
                                (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
-                               pipe->stream->mall_stream_config.type == SUBVP_NONE &&
+                               dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
                                (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
                                !pipe->plane_state->address.tmz_surface &&
                                (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
@@ -682,7 +683,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
 
                // Find the minimum pipe split count for non SubVP pipes
                if (resource_is_pipe_type(pipe, OPP_HEAD) &&
-                   pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+                       dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) {
                        split_cnt = 0;
                        while (pipe) {
                                split_cnt++;
@@ -735,8 +736,8 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
                 * and also to store the two main SubVP pipe pointers in subvp_pipes[2].
                 */
                if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
-                   pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
-                       phantom = pipe->stream->mall_stream_config.paired_stream;
+                       dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+                       phantom = dc_state_get_paired_subvp_stream(context, pipe->stream);
                        microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
                                        phantom->timing.v_addressable;
 
@@ -804,6 +805,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
        int16_t stretched_drr_us = 0;
        int16_t drr_stretched_vblank_us = 0;
        int16_t max_vblank_mallregion = 0;
+       struct dc_stream_state *phantom_stream;
+       bool subvp_found = false;
+       bool drr_found = false;
 
        // Find SubVP pipe
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -816,8 +820,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
                        continue;
 
                // Find the SubVP pipe
-               if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+               if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+                       subvp_found = true;
                        break;
+               }
        }
 
        // Find the DRR pipe
@@ -825,32 +831,37 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
                drr_pipe = &context->res_ctx.pipe_ctx[i];
 
                // We check for master pipe only
-               if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
-                               !resource_is_pipe_type(pipe, DPP_PIPE))
+               if (!resource_is_pipe_type(drr_pipe, OTG_MASTER) ||
+                               !resource_is_pipe_type(drr_pipe, DPP_PIPE))
                        continue;
 
-               if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
-                               (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed))
+               if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
+                               (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) {
+                       drr_found = true;
                        break;
+               }
        }
 
-       main_timing = &pipe->stream->timing;
-       phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
-       drr_timing = &drr_pipe->stream->timing;
-       prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
-                       (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
-                       dc->caps.subvp_prefetch_end_to_mall_start_us;
-       subvp_active_us = main_timing->v_addressable * main_timing->h_total /
-                       (double)(main_timing->pix_clk_100hz * 100) * 1000000;
-       drr_frame_us = drr_timing->v_total * drr_timing->h_total /
-                       (double)(drr_timing->pix_clk_100hz * 100) * 1000000;
-       // P-State allow width and FW delays already included phantom_timing->v_addressable
-       mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
-                       (double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
-       stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
-       drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
-                       (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
-       max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
+       if (subvp_found && drr_found) {
+               phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream);
+               main_timing = &pipe->stream->timing;
+               phantom_timing = &phantom_stream->timing;
+               drr_timing = &drr_pipe->stream->timing;
+               prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
+                               (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
+                               dc->caps.subvp_prefetch_end_to_mall_start_us;
+               subvp_active_us = main_timing->v_addressable * main_timing->h_total /
+                               (double)(main_timing->pix_clk_100hz * 100) * 1000000;
+               drr_frame_us = drr_timing->v_total * drr_timing->h_total /
+                               (double)(drr_timing->pix_clk_100hz * 100) * 1000000;
+               // P-State allow width and FW delays already included phantom_timing->v_addressable
+               mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
+                               (double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
+               stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
+               drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
+                               (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
+               max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
+       }
 
        /* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
         * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
@@ -895,6 +906,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
        struct dc_crtc_timing *main_timing = NULL;
        struct dc_crtc_timing *phantom_timing = NULL;
        struct dc_crtc_timing *vblank_timing = NULL;
+       struct dc_stream_state *phantom_stream;
+       enum mall_stream_type pipe_mall_type;
 
        /* For SubVP + VBLANK/DRR cases, we assume there can only be
         * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
@@ -904,6 +917,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
         */
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                pipe = &context->res_ctx.pipe_ctx[i];
+               pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 
                // We check for master pipe, but it shouldn't matter since we only need
                // the pipe for timing info (stream should be same for any pipe splits)
@@ -911,18 +925,19 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
                                !resource_is_pipe_type(pipe, DPP_PIPE))
                        continue;
 
-               if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+               if (!found && pipe_mall_type == SUBVP_NONE) {
                        // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
                        vblank_index = i;
                        found = true;
                }
 
-               if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+               if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
                        subvp_pipe = pipe;
        }
        if (found) {
+               phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
                main_timing = &subvp_pipe->stream->timing;
-               phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+               phantom_timing = &phantom_stream->timing;
                vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
                // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
                // Also include the prefetch end to mallstart delay time
@@ -977,7 +992,7 @@ static bool subvp_subvp_admissable(struct dc *dc,
                        continue;
 
                if (pipe->plane_state && !pipe->top_pipe &&
-                               pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+                               dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
                        refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
                                pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
                        refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
@@ -1026,23 +1041,23 @@ static bool subvp_validate_static_schedulability(struct dc *dc,
 
        for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+               enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 
                if (!pipe->stream)
                        continue;
 
                if (pipe->plane_state && !pipe->top_pipe) {
-                       if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+                       if (pipe_mall_type == SUBVP_MAIN)
                                subvp_count++;
-                       if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+                       if (pipe_mall_type == SUBVP_NONE)
                                non_subvp_pipes++;
-                       }
                }
 
                // Count how many planes that aren't SubVP/phantom are capable of VACTIVE
                // switching (SubVP + VACTIVE unsupported). In situations where we force
                // SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
                if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
-                   pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+                               pipe_mall_type == SUBVP_NONE) {
                        vactive_count++;
                }
                pipe_idx++;
@@ -1078,7 +1093,7 @@ static void assign_subvp_index(struct dc *dc, struct dc_state *context)
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
                if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
-                               pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+                               dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
                        pipe_ctx->subvp_index = index++;
                } else {
                        pipe_ctx->subvp_index = 0;
@@ -1532,7 +1547,8 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
                // If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
                // remove phantom pipes and repopulate dml pipes
                if (!found_supported_config) {
-                       dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
+                       dc_state_remove_phantom_streams_and_planes(dc, context);
+                       dc_state_release_phantom_streams_and_planes(dc, context);
                        vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
                        *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
 
@@ -1684,7 +1700,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
                pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
                                pipe_idx);
 
-               if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+               if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
                        // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
                        context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
                        context->res_ctx.pipe_ctx[i].unbounded_req = false;
@@ -1716,7 +1732,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
                                context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
                                context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
                        /* SS: all active surfaces stored in MALL */
-                       if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) {
+                       if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) {
                                context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
 
                                if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
@@ -1930,7 +1946,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
                return false;
 
        // For each full update, remove all existing phantom pipes first
-       dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate);
+       dc_state_remove_phantom_streams_and_planes(dc, context);
+       dc_state_release_phantom_streams_and_planes(dc, context);
 
        dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
 
@@ -3448,7 +3465,15 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co
        for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
                const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
-               if (!pipe->stream)
+               /* In DCN32/321, FPO uses per-pipe P-State force.
+                * If there's no planes, HUBP is power gated and
+                * therefore programming UCLK_PSTATE_FORCE does
+                * nothing (P-State will always be asserted naturally
+                * on a pipe that has HUBP power gated. Therefore we
+                * only want to enable FPO if the FPO pipe has both
+                * a stream and a plane.
+                */
+               if (!pipe->stream || !pipe->plane_state)
                        continue;
 
                if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
index 3d12dabd39e47d0d2a3fc918dd1d07dbc3902e5d..475c4ec43c013f481a71ad5668a8aef82ac7ba0a 100644 (file)
@@ -166,9 +166,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
        .num_states = 5,
        .sr_exit_time_us = 14.0,
        .sr_enter_plus_exit_time_us = 16.0,
-       .sr_exit_z8_time_us = 525.0,
-       .sr_enter_plus_exit_z8_time_us = 715.0,
-       .fclk_change_latency_us = 20.0,
+       .sr_exit_z8_time_us = 210.0,
+       .sr_enter_plus_exit_z8_time_us = 320.0,
+       .fclk_change_latency_us = 24.0,
        .usr_retraining_latency_us = 2,
        .writeback_latency_us = 12.0,
 
index 180f8a98a361a4d07f01c4a7d207ce3ad3196fdf..9be5ebf3a8c0ba7805b793b108923e057f2fdfe0 100644 (file)
@@ -5420,7 +5420,7 @@ static void CalculateOutputLink(
                                        *OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
                                                                                                OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (dml_uint_t)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
 
-                                       if (OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
+                                       if (*OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
                                                *RequiresDSC = true;
                                                LinkDSCEnable = true;
                                                *OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
@@ -6229,7 +6229,7 @@ static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *m
                                CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
                                CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
                                CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
-                               CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+                               CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
                                CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
                                CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
                                CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
                                mode_lib->ms.NoOfDPPThisState,
                                mode_lib->ms.dpte_group_bytes,
                                s->HostVMInefficiencyFactor,
-                               mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+                               mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
                                mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
 
                s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
@@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
                                                mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
                                                mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
                                                mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
-                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
                                                mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
                                                mode_lib->ms.MetaRowBytes[j][k],
                                                mode_lib->ms.DPTEBytesPerRow[j][k],
@@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
                CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
                CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
                CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
-               CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+               CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
                CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
                CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
                CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
@@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
                UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
                UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
                UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
-               UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+               UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
                UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
                UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
                UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
@@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
        CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
        CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
        CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
-       CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+       CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
        CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
        CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
        CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
@@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                        mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
                        locals->dpte_group_bytes,
                        s->HostVMInefficiencyFactor,
-                       mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+                       mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
                        mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
 
        locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
@@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                        CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
                        CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
                        CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
-                       CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+                       CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
                        CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
                        CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
                        CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                                                mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
                                                mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
                                                mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
-                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
                                                locals->PDEAndMetaPTEBytesFrame[k],
                                                locals->MetaRowByte[k],
                                                locals->PixelPTEBytesPerRow[k],
@@ -9446,13 +9446,13 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                CalculateWatermarks_params->CompressedBufferSizeInkByte = locals->CompressedBufferSizeInkByte;
 
                // Output
-               CalculateWatermarks_params->Watermark = &s->dummy_watermark; // Watermarks *Watermark
-               CalculateWatermarks_params->DRAMClockChangeSupport = &mode_lib->ms.support.DRAMClockChangeSupport[0];
-               CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0][0]; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
-               CalculateWatermarks_params->SubViewportLinesNeededInMALL = &mode_lib->ms.SubViewportLinesNeededInMALL[j]; // dml_uint_t SubViewportLinesNeededInMALL[]
-               CalculateWatermarks_params->FCLKChangeSupport = &mode_lib->ms.support.FCLKChangeSupport[0];
-               CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // dml_float_t *MaxActiveFCLKChangeLatencySupported
-               CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport[0];
+               CalculateWatermarks_params->Watermark = &locals->Watermark; // Watermarks *Watermark
+               CalculateWatermarks_params->DRAMClockChangeSupport = &locals->DRAMClockChangeSupport;
+               CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = locals->MaxActiveDRAMClockChangeLatencySupported; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
+               CalculateWatermarks_params->SubViewportLinesNeededInMALL = locals->SubViewportLinesNeededInMALL; // dml_uint_t SubViewportLinesNeededInMALL[]
+               CalculateWatermarks_params->FCLKChangeSupport = &locals->FCLKChangeSupport;
+               CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &locals->MaxActiveFCLKChangeLatencySupported; // dml_float_t *MaxActiveFCLKChangeLatencySupported
+               CalculateWatermarks_params->USRRetrainingSupport = &locals->USRRetrainingSupport;
 
                CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                        &mode_lib->scratch,
index e85866db80ff7b011880e28d449ce556d06acf5e..7ca7f2a743c25445326c86ff0c36b4c5031e01e5 100644 (file)
@@ -38,5 +38,6 @@
 #include "core_types.h"
 #include "dsc.h"
 #include "clk_mgr.h"
+#include "dc_state_priv.h"
 
 #endif //__DML2_DC_TYPES_H__
index 32f8a43af3d68cfb7aa9f73002188a1f0744b2aa..282d70e2b18ab8a2da54346562eb7b79fb451b43 100644 (file)
@@ -51,7 +51,7 @@ unsigned int dml2_helper_calculate_num_ways_for_subvp(struct dml2_context *ctx,
 
                // Find the phantom pipes
                if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
-                               pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+                               ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
                        bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
                        mblk_width = ctx->config.mall_cfg.mblk_width_pixels;
                        mblk_height = bytes_per_pixel == 4 ? mblk_width = ctx->config.mall_cfg.mblk_height_4bpe_pixels : ctx->config.mall_cfg.mblk_height_8bpe_pixels;
@@ -253,7 +253,7 @@ static bool assign_subvp_pipe(struct dml2_context *ctx, struct dc_state *context
                 *   to combine this with SubVP can cause issues with the scheduling).
                 */
                if (pipe->plane_state && !pipe->top_pipe &&
-                               pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 &&
+                               ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_NONE && refresh_rate < 120 &&
                                vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
                        while (pipe) {
                                num_pipes++;
@@ -317,7 +317,7 @@ static bool enough_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *st
 
                // Find the minimum pipe split count for non SubVP pipes
                if (pipe->stream && !pipe->top_pipe &&
-                   pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+                               ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_NONE) {
                        split_cnt = 0;
                        while (pipe) {
                                split_cnt++;
@@ -372,8 +372,8 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c
                 * and also to store the two main SubVP pipe pointers in subvp_pipes[2].
                 */
                if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
-                   pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
-                       phantom = pipe->stream->mall_stream_config.paired_stream;
+                               ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+                       phantom = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream);
                        microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
                                        phantom->timing.v_addressable;
 
@@ -435,6 +435,7 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context
        struct pipe_ctx *pipe = NULL;
        struct dc_crtc_timing *main_timing = NULL;
        struct dc_crtc_timing *phantom_timing = NULL;
+       struct dc_stream_state *phantom_stream;
        int16_t prefetch_us = 0;
        int16_t mall_region_us = 0;
        int16_t drr_frame_us = 0;       // nominal frame time
@@ -453,12 +454,13 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context
                        continue;
 
                // Find the SubVP pipe
-               if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+               if (ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
                        break;
        }
 
+       phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream);
        main_timing = &pipe->stream->timing;
-       phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
+       phantom_timing = &phantom_stream->timing;
        prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
                        (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
                        ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us;
@@ -519,6 +521,8 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
        struct dc_crtc_timing *main_timing = NULL;
        struct dc_crtc_timing *phantom_timing = NULL;
        struct dc_crtc_timing *vblank_timing = NULL;
+       struct dc_stream_state *phantom_stream;
+       enum mall_stream_type pipe_mall_type;
 
        /* For SubVP + VBLANK/DRR cases, we assume there can only be
         * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
@@ -528,19 +532,20 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
         */
        for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
                pipe = &context->res_ctx.pipe_ctx[i];
+               pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe);
 
                // We check for master pipe, but it shouldn't matter since we only need
                // the pipe for timing info (stream should be same for any pipe splits)
                if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
                        continue;
 
-               if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+               if (!found && pipe_mall_type == SUBVP_NONE) {
                        // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
                        vblank_index = i;
                        found = true;
                }
 
-               if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+               if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
                        subvp_pipe = pipe;
        }
        // Use ignore_msa_timing_param flag to identify as DRR
@@ -548,8 +553,9 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
                // SUBVP + DRR case
                schedulable = dml2_svp_drr_schedulable(ctx, context, &context->res_ctx.pipe_ctx[vblank_index].stream->timing);
        } else if (found) {
+               phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, subvp_pipe->stream);
                main_timing = &subvp_pipe->stream->timing;
-               phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+               phantom_timing = &phantom_stream->timing;
                vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
                // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
                // Also include the prefetch end to mallstart delay time
@@ -602,19 +608,20 @@ bool dml2_svp_validate_static_schedulability(struct dml2_context *ctx, struct dc
 
        for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+               enum mall_stream_type pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe);
 
                if (!pipe->stream)
                        continue;
 
                if (pipe->plane_state && !pipe->top_pipe &&
-                               pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+                               pipe_mall_type == SUBVP_MAIN)
                        subvp_count++;
 
                // Count how many planes that aren't SubVP/phantom are capable of VACTIVE
                // switching (SubVP + VACTIVE unsupported). In situations where we force
                // SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
                if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 &&
-                   pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+                   pipe_mall_type == SUBVP_NONE) {
                        vactive_count++;
                }
                pipe_idx++;
@@ -708,14 +715,10 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
 static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int dc_pipe_idx, unsigned int svp_height, unsigned int vstartup)
 {
        struct pipe_ctx *ref_pipe = &state->res_ctx.pipe_ctx[dc_pipe_idx];
-       struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_stream_for_sink(ref_pipe->stream->sink);
-
-       phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
-       phantom_stream->dpms_off = true;
-       phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
-       phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
-       ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
-       ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+       struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_phantom_stream(
+                       ctx->config.svp_pstate.callbacks.dc,
+                       state,
+                       ref_pipe->stream);
 
        /* stream has limited viewport and small timing */
        memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
@@ -723,7 +726,10 @@ static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, s
        memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
        set_phantom_stream_timing(ctx, state, ref_pipe, phantom_stream, dc_pipe_idx, svp_height, vstartup);
 
-       ctx->config.svp_pstate.callbacks.add_stream_to_ctx(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
+       ctx->config.svp_pstate.callbacks.add_phantom_stream(ctx->config.svp_pstate.callbacks.dc,
+                       state,
+                       phantom_stream,
+                       ref_pipe->stream);
        return phantom_stream;
 }
 
@@ -740,7 +746,10 @@ static void enable_phantom_plane(struct dml2_context *ctx,
                if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) {
                        phantom_plane = prev_phantom_plane;
                } else {
-                       phantom_plane = ctx->config.svp_pstate.callbacks.create_plane(ctx->config.svp_pstate.callbacks.dc);
+                       phantom_plane = ctx->config.svp_pstate.callbacks.create_phantom_plane(
+                                       ctx->config.svp_pstate.callbacks.dc,
+                                       state,
+                                       curr_pipe->plane_state);
                }
 
                memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
@@ -763,9 +772,7 @@ static void enable_phantom_plane(struct dml2_context *ctx,
                phantom_plane->clip_rect.y = 0;
                phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
 
-               phantom_plane->is_phantom = true;
-
-               ctx->config.svp_pstate.callbacks.add_plane_to_context(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state);
+               ctx->config.svp_pstate.callbacks.add_phantom_plane(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state);
 
                curr_pipe = curr_pipe->bottom_pipe;
                prev_phantom_plane = phantom_plane;
@@ -790,7 +797,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_
                // We determine which phantom pipes were added by comparing with
                // the phantom stream.
                if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream &&
-                               pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+                               ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
                        pipe->stream->use_dynamic_meta = false;
                        pipe->plane_state->flip_immediate = false;
                        if (!ctx->config.svp_pstate.callbacks.build_scaling_params(pipe)) {
@@ -800,7 +807,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_
        }
 }
 
-static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context)
+static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context)
 {
        int i, old_plane_count;
        struct dc_stream_status *stream_status = NULL;
@@ -821,9 +828,11 @@ static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_str
        for (i = 0; i < old_plane_count; i++)
                del_planes[i] = stream_status->plane_states[i];
 
-       for (i = 0; i < old_plane_count; i++)
-               if (!ctx->config.svp_pstate.callbacks.remove_plane_from_context(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context))
+       for (i = 0; i < old_plane_count; i++) {
+               if (!ctx->config.svp_pstate.callbacks.remove_phantom_plane(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context))
                        return false;
+               ctx->config.svp_pstate.callbacks.release_phantom_plane(ctx->config.svp_pstate.callbacks.dc, context, del_planes[i]);
+       }
 
        return true;
 }
@@ -832,35 +841,21 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state
 {
        int i;
        bool removed_pipe = false;
-       struct dc_plane_state *phantom_plane = NULL;
        struct dc_stream_state *phantom_stream = NULL;
 
        for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
                struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
                // build scaling params for phantom pipes
-               if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
-                       phantom_plane = pipe->plane_state;
+               if (pipe->plane_state && pipe->stream && ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
                        phantom_stream = pipe->stream;
 
-                       remove_all_planes_for_stream(ctx, pipe->stream, state);
-                       ctx->config.svp_pstate.callbacks.remove_stream_from_ctx(ctx->config.svp_pstate.callbacks.dc, state, pipe->stream);
-
-                       /* Ref count is incremented on allocation and also when added to the context.
-                        * Therefore we must call release for the the phantom plane and stream once
-                        * they are removed from the ctx to finally decrement the refcount to 0 to free.
-                        */
-                       ctx->config.svp_pstate.callbacks.plane_state_release(phantom_plane);
-                       ctx->config.svp_pstate.callbacks.stream_release(phantom_stream);
+                       remove_all_phantom_planes_for_stream(ctx, phantom_stream, state);
+                       ctx->config.svp_pstate.callbacks.remove_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
+                       ctx->config.svp_pstate.callbacks.release_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
 
                        removed_pipe = true;
                }
 
-               // Clear all phantom stream info
-               if (pipe->stream) {
-                       pipe->stream->mall_stream_config.type = SUBVP_NONE;
-                       pipe->stream->mall_stream_config.paired_stream = NULL;
-               }
-
                if (pipe->plane_state) {
                        pipe->plane_state->is_phantom = false;
                }
index 279e7605a0a2bdda4d33fe7896d7a855718ae69c..64d01a9cd68c859db9bcffbc478ef09090b07fbf 100644 (file)
@@ -626,8 +626,8 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
                if (is_dp2p0_output_encoder(pipe))
                        out->OutputEncoder[location] = dml_dp2p0;
                break;
-               out->OutputEncoder[location] = dml_edp;
        case SIGNAL_TYPE_EDP:
+               out->OutputEncoder[location] = dml_edp;
                break;
        case SIGNAL_TYPE_HDMI_TYPE_A:
        case SIGNAL_TYPE_DVI_SINGLE_LINK:
@@ -1049,8 +1049,10 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2,
 
 void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
 {
-       int i = 0, j = 0;
+       int i = 0, j = 0, k = 0;
        int disp_cfg_stream_location, disp_cfg_plane_location;
+       enum mall_stream_type stream_mall_type;
+       struct pipe_ctx *current_pipe_context;
 
        for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
                dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id_valid[i] = false;
@@ -1070,7 +1072,17 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
        dml2_populate_pipe_to_plane_index_mapping(dml2, context);
 
        for (i = 0; i < context->stream_count; i++) {
+               current_pipe_context = NULL;
+               for (k = 0; k < MAX_PIPES; k++) {
+                       /* find one pipe allocated to this stream for the purpose of getting
+                       info about the link later */
+                       if (context->streams[i] == context->res_ctx.pipe_ctx[k].stream) {
+                               current_pipe_context = &context->res_ctx.pipe_ctx[k];
+                               break;
+                       }
+               }
                disp_cfg_stream_location = map_stream_to_dml_display_cfg(dml2, context->streams[i], dml_dispcfg);
+               stream_mall_type = dc_state_get_stream_subvp_type(context, context->streams[i]);
 
                if (disp_cfg_stream_location < 0)
                        disp_cfg_stream_location = dml_dispcfg->num_timings++;
@@ -1078,7 +1090,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
                ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
 
                populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
-               populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], &context->res_ctx.pipe_ctx[i]);
+               populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context);
                switch (context->streams[i]->debug.force_odm_combine_segments) {
                case 2:
                        dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1;
@@ -1115,10 +1127,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
                                populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
                                populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context);
 
-                               if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) {
+                               if (stream_mall_type == SUBVP_MAIN) {
                                        dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport;
                                        dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_optimize;
-                               } else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) {
+                               } else if (stream_mall_type == SUBVP_PHANTOM) {
                                        dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe;
                                        dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_disable;
                                        dml2->v20.dml_core_ctx.policy.ImmediateFlipRequirement[disp_cfg_plane_location] = dml_immediate_flip_not_required;
@@ -1135,7 +1147,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
 
                                if (j >= 1) {
                                        populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]);
-                                       populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], &context->res_ctx.pipe_ctx[i]);
+                                       populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context);
                                        switch (context->streams[i]->debug.force_odm_combine_segments) {
                                        case 2:
                                                dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1;
@@ -1147,9 +1159,9 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
                                                break;
                                        }
 
-                                       if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN)
+                                       if (stream_mall_type == SUBVP_MAIN)
                                                dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport;
-                                       else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+                                       else if (stream_mall_type == SUBVP_PHANTOM)
                                                dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe;
 
                                        dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[disp_cfg_plane_location] = context->streams[i]->stream_id;
index 814dbdcf9a787afd5bf2b4da10fb448441b40e69..1068b962d1c12bf5145a7c64ae7ddfa991d0b198 100644 (file)
@@ -155,11 +155,19 @@ unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, e
 
 bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
 {
+       if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+               return false;
+
        /* If this assert is hit then we have a link encoder dynamic management issue */
        ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
 
-       if (pipe_ctx->stream == NULL)
-               return false;
+       /* Count MST hubs once by treating only 1st remote sink in topology as an encoder */
+       if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) {
+               return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
+                       pipe_ctx->link_res.hpo_dp_link_enc &&
+                       dc_is_dp_signal(pipe_ctx->stream->signal) &&
+                       (pipe_ctx->stream->link->remote_sinks[0]->sink_id == pipe_ctx->stream->sink->sink_id));
+       }
 
        return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
                pipe_ctx->link_res.hpo_dp_link_enc &&
@@ -279,6 +287,7 @@ static void populate_pipe_ctx_dlg_params_from_dml(struct pipe_ctx *pipe_ctx, str
 void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, struct dml2_context *in_ctx, unsigned int pipe_cnt)
 {
        unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id;
+       enum mall_stream_type pipe_mall_type;
        bool unbounded_req_enabled = false;
        struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch;
 
@@ -326,7 +335,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
                 */
                populate_pipe_ctx_dlg_params_from_dml(&context->res_ctx.pipe_ctx[dc_pipe_ctx_index], &context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx);
 
-               if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+               pipe_mall_type = dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[dc_pipe_ctx_index]);
+               if (pipe_mall_type == SUBVP_PHANTOM) {
                        // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
                        context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = 0;
                        context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false;
@@ -353,7 +363,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
                        context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state != context->res_ctx.pipe_ctx[dc_pipe_ctx_index].top_pipe->plane_state) &&
                        context->res_ctx.pipe_ctx[dc_pipe_ctx_index].prev_odm_pipe == NULL) {
                        /* SS: all active surfaces stored in MALL */
-                       if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type != SUBVP_PHANTOM) {
+                       if (pipe_mall_type != SUBVP_PHANTOM) {
                                context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes;
                        } else {
                                /* SUBVP: phantom surfaces only stored in MALL */
index 9d354fde6908a9f97c7a5574db14353d4dc25b16..26307e599614c6e1212c53184ba02849ae6e1dbb 100644 (file)
@@ -418,7 +418,7 @@ static int find_drr_eligible_stream(struct dc_state *display_state)
        int i;
 
        for (i = 0; i < display_state->stream_count; i++) {
-               if (display_state->streams[i]->mall_stream_config.type == SUBVP_NONE
+               if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
                        && display_state->streams[i]->ignore_msa_timing_param) {
                        // Use ignore_msa_timing_param flag to identify as DRR
                        return i;
@@ -634,6 +634,8 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
                dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
                memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
                dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
+               //copy for deciding zstate use
+               context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
        }
 
        return result;
index 0de6886969c69b3860dda4a60269a9eb922f6cb0..ee0eb184eb6d7ea23bec303133cfaa76dc40854a 100644 (file)
@@ -93,15 +93,34 @@ struct dml2_dc_callbacks {
 struct dml2_dc_svp_callbacks {
        struct dc *dc;
        bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx);
-       struct dc_stream_state* (*create_stream_for_sink)(struct dc_sink *dc_sink_data);
-       struct dc_plane_state* (*create_plane)(struct dc *dc);
-       enum dc_status (*add_stream_to_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
-       bool (*add_plane_to_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
-       bool (*remove_plane_from_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
-       enum dc_status (*remove_stream_from_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream);
-       void (*plane_state_release)(struct dc_plane_state *plane_state);
-       void (*stream_release)(struct dc_stream_state *stream);
+       struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc,
+                       struct dc_state *state,
+                       struct dc_stream_state *main_stream);
+       struct dc_plane_state* (*create_phantom_plane)(struct dc *dc,
+                       struct dc_state *state,
+                       struct dc_plane_state *main_plane);
+       enum dc_status (*add_phantom_stream)(struct dc *dc,
+                       struct dc_state *state,
+                       struct dc_stream_state *phantom_stream,
+                       struct dc_stream_state *main_stream);
+       bool (*add_phantom_plane)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
+       bool (*remove_phantom_plane)(const struct dc *dc,
+                       struct dc_stream_state *stream,
+                       struct dc_plane_state *plane_state,
+                       struct dc_state *context);
+       enum dc_status (*remove_phantom_stream)(struct dc *dc,
+                       struct dc_state *state,
+                       struct dc_stream_state *stream);
+       void (*release_phantom_plane)(const struct dc *dc,
+                       struct dc_state *state,
+                       struct dc_plane_state *plane);
+       void (*release_phantom_stream)(const struct dc *dc,
+                       struct dc_state *state,
+                       struct dc_stream_state *stream);
        void (*release_dsc)(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc);
+       enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx);
+       enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream);
+       struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream);
 };
 
 struct dml2_clks_table_entry {
index bccd46bd18158cfd1f9750cf02f899dbe4bcae3f..254136f8e3f90784ae33263ff998e70f9fdcfbe8 100644 (file)
@@ -78,7 +78,7 @@ ifdef CONFIG_DRM_AMD_DC_FP
 # DCN
 ###############################################################################
 
-HWSS_DCN10 = dcn10_hwseq.o
+HWSS_DCN10 = dcn10_hwseq.o dcn10_init.o
 
 AMD_DAL_HWSS_DCN10 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn10/,$(HWSS_DCN10))
 
@@ -86,7 +86,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN10)
 
 ###############################################################################
 
-HWSS_DCN20 = dcn20_hwseq.o
+HWSS_DCN20 = dcn20_hwseq.o dcn20_init.o
 
 AMD_DAL_HWSS_DCN20 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn20/,$(HWSS_DCN20))
 
@@ -94,7 +94,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN20)
 
 ###############################################################################
 
-HWSS_DCN201 = dcn201_hwseq.o
+HWSS_DCN201 = dcn201_hwseq.o dcn201_init.o
 
 AMD_DAL_HWSS_DCN201 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn201/,$(HWSS_DCN201))
 
@@ -102,7 +102,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN201)
 
 ###############################################################################
 
-HWSS_DCN21 = dcn21_hwseq.o
+HWSS_DCN21 = dcn21_hwseq.o dcn21_init.o
 
 AMD_DAL_HWSS_DCN21 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn21/,$(HWSS_DCN21))
 
@@ -114,7 +114,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN21)
 
 ###############################################################################
 
-HWSS_DCN30 = dcn30_hwseq.o
+HWSS_DCN30 = dcn30_hwseq.o dcn30_init.o
 
 AMD_DAL_HWSS_DCN30 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn30/,$(HWSS_DCN30))
 
@@ -122,7 +122,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN30)
 
 ###############################################################################
 
-HWSS_DCN301 = dcn301_hwseq.o
+HWSS_DCN301 = dcn301_hwseq.o dcn301_init.o
 
 AMD_DAL_HWSS_DCN301 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn301/,$(HWSS_DCN301))
 
@@ -130,15 +130,17 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN301)
 
 ###############################################################################
 
-HWSS_DCN302 = dcn302_hwseq.o
+HWSS_DCN302 = dcn302_hwseq.o dcn302_init.o
 
 AMD_DAL_HWSS_DCN302 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn302/,$(HWSS_DCN302))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN302)
 
+
+
 ###############################################################################
 
-HWSS_DCN303 = dcn303_hwseq.o
+HWSS_DCN303 = dcn303_hwseq.o dcn303_init.o
 
 AMD_DAL_HWSS_DCN303 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn303/,$(HWSS_DCN303))
 
@@ -146,7 +148,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN303)
 
 ###############################################################################
 
-HWSS_DCN31 = dcn31_hwseq.o
+HWSS_DCN31 = dcn31_hwseq.o dcn31_init.o
 
 AMD_DAL_HWSS_DCN31 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn31/,$(HWSS_DCN31))
 
@@ -154,7 +156,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN31)
 
 ###############################################################################
 
-HWSS_DCN314 = dcn314_hwseq.o
+HWSS_DCN314 = dcn314_hwseq.o dcn314_init.o
 
 AMD_DAL_HWSS_DCN314 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn314/,$(HWSS_DCN314))
 
@@ -162,7 +164,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN314)
 
 ###############################################################################
 
-HWSS_DCN32 = dcn32_hwseq.o
+HWSS_DCN32 = dcn32_hwseq.o dcn32_init.o
 
 AMD_DAL_HWSS_DCN32 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn32/,$(HWSS_DCN32))
 
@@ -170,7 +172,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN32)
 
 ###############################################################################
 
-HWSS_DCN35 = dcn35_hwseq.o
+HWSS_DCN35 = dcn35_hwseq.o dcn35_init.o
 
 AMD_DAL_HWSS_DCN35 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn35/,$(HWSS_DCN35))
 
@@ -180,4 +182,4 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35)
 
 ###############################################################################
 
-endif
\ No newline at end of file
+endif
index c73fe5e9b36107b8e54cc13f08125404340a2374..5660f15da291e9de58637c115e315b07f1cee7a3 100644 (file)
@@ -55,6 +55,7 @@
 #include "audio.h"
 #include "reg_helper.h"
 #include "panel_cntl.h"
+#include "dc_state_priv.h"
 #include "dpcd_defs.h"
 /* include DCE11 register header files */
 #include "dce/dce_11_0_d.h"
@@ -1353,7 +1354,7 @@ static void build_audio_output(
        if (state->clk_mgr &&
                (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
                        pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
-               audio_output->pll_info.dp_dto_source_clock_in_khz =
+               audio_output->pll_info.audio_dto_source_clock_in_khz =
                                state->clk_mgr->funcs->get_dp_ref_clk_frequency(
                                                state->clk_mgr);
        }
@@ -1596,7 +1597,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
         * is constructed with the same sink). Make sure not to override
         * and link programming on the main.
         */
-       if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+       if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
                pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
                pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false;
        }
@@ -1684,7 +1685,7 @@ static void disable_vga_and_power_gate_all_controllers(
                                true);
 
                dc->current_state->res_ctx.pipe_ctx[i].pipe_idx = i;
-               dc->hwss.disable_plane(dc,
+               dc->hwss.disable_plane(dc, dc->current_state,
                        &dc->current_state->res_ctx.pipe_ctx[i]);
        }
 }
@@ -2124,7 +2125,8 @@ static void dce110_reset_hw_ctx_wrap(
                                BREAK_TO_DEBUGGER();
                        }
                        pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
-                       pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+                       if (dc_is_hdmi_tmds_signal(pipe_ctx_old->stream->signal))
+                               pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
                        pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
                                        pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
 
@@ -2133,7 +2135,7 @@ static void dce110_reset_hw_ctx_wrap(
                                                                                old_clk))
                                old_clk->funcs->cs_power_down(old_clk);
 
-                       dc->hwss.disable_plane(dc, pipe_ctx_old);
+                       dc->hwss.disable_plane(dc, dc->current_state, pipe_ctx_old);
 
                        pipe_ctx_old->stream = NULL;
                }
@@ -2497,6 +2499,7 @@ static bool wait_for_reset_trigger_to_occur(
 /* Enable timing synchronization for a group of Timing Generators. */
 static void dce110_enable_timing_synchronization(
                struct dc *dc,
+               struct dc_state *state,
                int group_index,
                int group_size,
                struct pipe_ctx *grouped_pipes[])
@@ -2590,6 +2593,7 @@ static void init_hw(struct dc *dc)
        struct dmcu *dmcu;
        struct dce_hwseq *hws = dc->hwseq;
        uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+       uint32_t user_level = MAX_BACKLIGHT_LEVEL;
 
        bp = dc->ctx->dc_bios;
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2639,13 +2643,15 @@ static void init_hw(struct dc *dc)
        for (i = 0; i < dc->link_count; i++) {
                struct dc_link *link = dc->links[i];
 
-               if (link->panel_cntl)
+               if (link->panel_cntl) {
                        backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+                       user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+               }
        }
 
        abm = dc->res_pool->abm;
        if (abm != NULL)
-               abm->funcs->abm_init(abm, backlight);
+               abm->funcs->abm_init(abm, backlight, user_level);
 
        dmcu = dc->res_pool->dmcu;
        if (dmcu != NULL && abm != NULL)
@@ -2842,7 +2848,7 @@ static void dce110_post_unlock_program_front_end(
 {
 }
 
-static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void dce110_power_down_fe(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
 {
        struct dce_hwseq *hws = dc->hwseq;
        int fe_idx = pipe_ctx->plane_res.mi ?
index cdb903116eb7ca54e91746324f47ffe18abe70db..6dd479e8a348502c9b285a38f16650fb7cb4f95e 100644 (file)
@@ -56,6 +56,7 @@
 #include "dc_trace.h"
 #include "dce/dmub_outbox.h"
 #include "link.h"
+#include "dc_state_priv.h"
 
 #define DC_LOGGER \
        dc_logger
@@ -115,7 +116,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
                    !pipe_ctx->stream ||
                    (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
                    !tg->funcs->is_tg_enabled(tg) ||
-                       pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
+                       dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
                        continue;
 
                if (lock)
@@ -1057,7 +1058,8 @@ static void dcn10_reset_back_end_for_pipe(
                if (pipe_ctx->stream_res.tg->funcs->set_drr)
                        pipe_ctx->stream_res.tg->funcs->set_drr(
                                        pipe_ctx->stream_res.tg, NULL);
-               pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+               if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+                       pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
        }
 
        for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1180,7 +1182,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
 }
 
 /* trigger HW to start disconnect plane from stream on the next vsync */
-void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_plane_atomic_disconnect(struct dc *dc,
+               struct dc_state *state,
+               struct pipe_ctx *pipe_ctx)
 {
        struct dce_hwseq *hws = dc->hwseq;
        struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -1200,7 +1204,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
        mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
        // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
        // so don't wait for MPCC_IDLE in the programming sequence
-       if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
+       if (opp != NULL && dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
                opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
 
        dc->optimized_required = true;
@@ -1290,7 +1294,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
        pipe_ctx->plane_state = NULL;
 }
 
-void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
 {
        struct dce_hwseq *hws = dc->hwseq;
        DC_LOGGER_INIT(dc->ctx->logger);
@@ -1416,12 +1420,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
                dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
                pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
 
-               hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+               hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
 
                if (tg->funcs->is_tg_enabled(tg))
                        tg->funcs->unlock(tg);
 
-               dc->hwss.disable_plane(dc, pipe_ctx);
+               dc->hwss.disable_plane(dc, context, pipe_ctx);
 
                pipe_ctx->stream_res.tg = NULL;
                pipe_ctx->plane_res.hubp = NULL;
@@ -1486,6 +1490,7 @@ void dcn10_init_hw(struct dc *dc)
        struct dc_bios *dcb = dc->ctx->dc_bios;
        struct resource_pool *res_pool = dc->res_pool;
        uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+       uint32_t user_level = MAX_BACKLIGHT_LEVEL;
        bool   is_optimized_init_done = false;
 
        if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -1583,12 +1588,14 @@ void dcn10_init_hw(struct dc *dc)
                for (i = 0; i < dc->link_count; i++) {
                        struct dc_link *link = dc->links[i];
 
-                       if (link->panel_cntl)
+                       if (link->panel_cntl) {
                                backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+                               user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+                       }
                }
 
                if (abm != NULL)
-                       abm->funcs->abm_init(abm, backlight);
+                       abm->funcs->abm_init(abm, backlight, user_level);
 
                if (dmcu != NULL && !dmcu->auto_load_dmcu)
                        dmcu->funcs->dmcu_init(dmcu);
@@ -2262,6 +2269,7 @@ void dcn10_enable_vblanks_synchronization(
 
 void dcn10_enable_timing_synchronization(
        struct dc *dc,
+       struct dc_state *state,
        int group_index,
        int group_size,
        struct pipe_ctx *grouped_pipes[])
@@ -2276,7 +2284,7 @@ void dcn10_enable_timing_synchronization(
        DC_SYNC_INFO("Setting up OTG reset trigger\n");
 
        for (i = 1; i < group_size; i++) {
-               if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+               if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
                        continue;
 
                opp = grouped_pipes[i]->stream_res.opp;
@@ -2296,14 +2304,14 @@ void dcn10_enable_timing_synchronization(
                if (grouped_pipes[i]->stream == NULL)
                        continue;
 
-               if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+               if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
                        continue;
 
                grouped_pipes[i]->stream->vblank_synchronized = false;
        }
 
        for (i = 1; i < group_size; i++) {
-               if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+               if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
                        continue;
 
                grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
@@ -2317,11 +2325,11 @@ void dcn10_enable_timing_synchronization(
         * synchronized. Look at last pipe programmed to reset.
         */
 
-       if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
+       if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
                wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
 
        for (i = 1; i < group_size; i++) {
-               if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+               if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
                        continue;
 
                grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
@@ -2329,7 +2337,7 @@ void dcn10_enable_timing_synchronization(
        }
 
        for (i = 1; i < group_size; i++) {
-               if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+               if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
                        continue;
 
                opp = grouped_pipes[i]->stream_res.opp;
@@ -3021,7 +3029,7 @@ void dcn10_post_unlock_program_front_end(
 
        for (i = 0; i < dc->res_pool->pipe_count; i++)
                if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
-                       dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+                       dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
 
        for (i = 0; i < dc->res_pool->pipe_count; i++)
                if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
index ef6d56da417cdffb6b15de4529972d8ef3795bc4..bc5dd68a2408801d0e9f46373e523e1dab01c461 100644 (file)
@@ -75,7 +75,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
 void dcn10_reset_hw_ctx_wrap(
                struct dc *dc,
                struct dc_state *context);
-void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
 void dcn10_lock_all_pipes(
                struct dc *dc,
                struct dc_state *context,
@@ -108,13 +108,16 @@ void dcn10_power_down_on_boot(struct dc *dc);
 enum dc_status dce110_apply_ctx_to_hw(
                struct dc *dc,
                struct dc_state *context);
-void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_plane_atomic_disconnect(struct dc *dc,
+               struct dc_state *state,
+               struct pipe_ctx *pipe_ctx);
 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data);
 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx);
 void dce110_power_down(struct dc *dc);
 void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
 void dcn10_enable_timing_synchronization(
                struct dc *dc,
+               struct dc_state *state,
                int group_index,
                int group_size,
                struct pipe_ctx *grouped_pipes[]);
index c3c83178eb1e3c427dc1a8d4beba87f1d4ec8f48..e931342fcf4cf1d4f4b0cf41628cd9f855fa6dac 100644 (file)
@@ -55,6 +55,7 @@
 #include "inc/link_enc_cfg.h"
 #include "link_hwss.h"
 #include "link.h"
+#include "dc_state_priv.h"
 
 #define DC_LOGGER \
        dc_logger
@@ -623,9 +624,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
 }
 
 
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
 {
-       bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+       bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
        struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
 
        DC_LOGGER_INIT(dc->ctx->logger);
@@ -847,7 +848,7 @@ enum dc_status dcn20_enable_stream_timing(
        /* TODO enable stream if timing changed */
        /* TODO unblank stream if DP */
 
-       if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+       if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
                if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
                        pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
        }
@@ -1368,8 +1369,14 @@ void dcn20_pipe_control_lock(
        }
 }
 
-static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
+static void dcn20_detect_pipe_changes(struct dc_state *old_state,
+               struct dc_state *new_state,
+               struct pipe_ctx *old_pipe,
+               struct pipe_ctx *new_pipe)
 {
+       bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
+       bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
+
        new_pipe->update_flags.raw = 0;
 
        /* If non-phantom pipe is being transitioned to a phantom pipe,
@@ -1379,8 +1386,8 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
         * be different). The post_unlock sequence will set the correct
         * update flags to enable the phantom pipe.
         */
-       if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom &&
-                       new_pipe->plane_state && new_pipe->plane_state->is_phantom) {
+       if (old_pipe->plane_state && !old_is_phantom &&
+                       new_pipe->plane_state && new_is_phantom) {
                new_pipe->update_flags.bits.disable = 1;
                return;
        }
@@ -1400,6 +1407,10 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
                new_pipe->update_flags.bits.scaler = 1;
                new_pipe->update_flags.bits.viewport = 1;
                new_pipe->update_flags.bits.det_size = 1;
+               if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
+                               new_pipe->stream_res.test_pattern_params.width != 0 &&
+                               new_pipe->stream_res.test_pattern_params.height != 0)
+                       new_pipe->update_flags.bits.test_pattern_changed = 1;
                if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
                        new_pipe->update_flags.bits.odm = 1;
                        new_pipe->update_flags.bits.global_sync = 1;
@@ -1412,14 +1423,14 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
         * The remove-add sequence of the phantom pipe always results in the pipe
         * being blanked in enable_stream_timing (DPG).
         */
-       if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+       if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
                new_pipe->update_flags.bits.enable = 1;
 
        /* Phantom pipes are effectively disabled, if the pipe was previously phantom
         * we have to enable
         */
-       if (old_pipe->plane_state && old_pipe->plane_state->is_phantom &&
-                       new_pipe->plane_state && !new_pipe->plane_state->is_phantom)
+       if (old_pipe->plane_state && old_is_phantom &&
+                       new_pipe->plane_state && !new_is_phantom)
                new_pipe->update_flags.bits.enable = 1;
 
        if (old_pipe->plane_state && !new_pipe->plane_state) {
@@ -1556,6 +1567,7 @@ static void dcn20_update_dchubp_dpp(
        struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        struct dccg *dccg = dc->res_pool->dccg;
        bool viewport_changed = false;
+       enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
 
        if (pipe_ctx->update_flags.bits.dppclk)
                dpp->funcs->dpp_dppclk_control(dpp, false, true);
@@ -1701,7 +1713,7 @@ static void dcn20_update_dchubp_dpp(
                pipe_ctx->update_flags.bits.plane_changed ||
                plane_state->update_flags.bits.addr_update) {
                if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
-                               pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+                               pipe_mall_type == SUBVP_MAIN) {
                        union block_sequence_params params;
 
                        params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
@@ -1715,7 +1727,7 @@ static void dcn20_update_dchubp_dpp(
        if (pipe_ctx->update_flags.bits.enable)
                hubp->funcs->set_blank(hubp, false);
        /* If the stream paired with this plane is phantom, the plane is also phantom */
-       if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM
+       if (pipe_ctx->stream && pipe_mall_type == SUBVP_PHANTOM
                        && hubp->funcs->phantom_hubp_post_enable)
                hubp->funcs->phantom_hubp_post_enable(hubp);
 }
@@ -1773,7 +1785,7 @@ static void dcn20_program_pipe(
                                pipe_ctx->pipe_dlg_param.vupdate_offset,
                                pipe_ctx->pipe_dlg_param.vupdate_width);
 
-               if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM)
+               if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
                        pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
 
                pipe_ctx->stream_res.tg->funcs->set_vtg_params(
@@ -1912,7 +1924,7 @@ void dcn20_program_front_end_for_ctx(
 
        /* Set pipe update flags and lock pipes */
        for (i = 0; i < dc->res_pool->pipe_count; i++)
-               dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+               dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
                                &context->res_ctx.pipe_ctx[i]);
 
        /* When disabling phantom pipes, turn on phantom OTG first (so we can get double
@@ -1922,15 +1934,16 @@ void dcn20_program_front_end_for_ctx(
                struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
 
                if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
-                       dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+                               dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
                        struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
 
                        if (tg->funcs->enable_crtc) {
                                if (dc->hwss.blank_phantom) {
                                        int main_pipe_width, main_pipe_height;
+                                       struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream);
 
-                                       main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width;
-                                       main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height;
+                                       main_pipe_width = phantom_stream->dst.width;
+                                       main_pipe_height = phantom_stream->dst.height;
                                        dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
                                }
                                tg->funcs->enable_crtc(tg);
@@ -1959,9 +1972,9 @@ void dcn20_program_front_end_for_ctx(
                         * DET allocation.
                         */
                        if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
-                                       (context->res_ctx.pipe_ctx[i].plane_state && context->res_ctx.pipe_ctx[i].plane_state->is_phantom)))
+                                       (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM)))
                                hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
-                       hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+                       hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
                        DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
                }
 
@@ -1984,7 +1997,7 @@ void dcn20_program_front_end_for_ctx(
                                         * but the MPO still exists until the double buffered update of the main pipe so we
                                         * will get a frame of underflow if the phantom pipe is programmed here.
                                         */
-                                       if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM)
+                                       if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
                                                dcn20_program_pipe(dc, pipe, context);
                                }
 
@@ -2034,7 +2047,7 @@ void dcn20_post_unlock_program_front_end(
 
        for (i = 0; i < dc->res_pool->pipe_count; i++)
                if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
-                       dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+                       dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
 
        /*
         * If we are enabling a pipe, we need to wait for pending clear as this is a critical
@@ -2046,7 +2059,7 @@ void dcn20_post_unlock_program_front_end(
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
                // Don't check flip pending on phantom pipes
                if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
-                               pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+                               dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
                        struct hubp *hubp = pipe->plane_res.hubp;
                        int j = 0;
                        for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
@@ -2069,7 +2082,7 @@ void dcn20_post_unlock_program_front_end(
                         * programming sequence).
                         */
                        while (pipe) {
-                               if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+                               if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
                                        /* When turning on the phantom pipe we want to run through the
                                         * entire enable sequence, so apply all the "enable" flags.
                                         */
@@ -2139,7 +2152,7 @@ void dcn20_prepare_bandwidth(
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
                // At optimize don't restore the original watermark value
-               if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+               if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) {
                        context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
                        break;
                }
@@ -2183,7 +2196,7 @@ void dcn20_optimize_bandwidth(
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
                // At optimize don't need  to restore the original watermark value
-               if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+               if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) {
                        context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
                        break;
                }
@@ -2217,7 +2230,8 @@ void dcn20_optimize_bandwidth(
                        dc->clk_mgr,
                        context,
                        true);
-       if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+       if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
+               !dc->debug.disable_extblankadj) {
                for (i = 0; i < dc->res_pool->pipe_count; ++i) {
                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
@@ -2610,7 +2624,8 @@ static void dcn20_reset_back_end_for_pipe(
                 * the case where the same symclk is shared across multiple otg
                 * instances
                 */
-               link->phy_state.symclk_ref_cnts.otg = 0;
+               if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+                       link->phy_state.symclk_ref_cnts.otg = 0;
                if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
                        link_hwss->disable_link_output(link,
                                        &pipe_ctx->link_res, pipe_ctx->stream->signal);
@@ -2943,7 +2958,7 @@ void dcn20_fpga_init_hw(struct dc *dc)
                dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
                pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
                /*to do*/
-               hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+               hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
        }
 
        /* initialize DWB pointer to MCIF_WB */
@@ -2960,7 +2975,7 @@ void dcn20_fpga_init_hw(struct dc *dc)
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
-               dc->hwss.disable_plane(dc, pipe_ctx);
+               dc->hwss.disable_plane(dc, context, pipe_ctx);
 
                pipe_ctx->stream_res.tg = NULL;
                pipe_ctx->plane_res.hubp = NULL;
index ab02e4e9c8c292fa051b9a4cd2a63f1881767586..b94c85340abff7c02f3ec59025b04c8417d77bd6 100644 (file)
@@ -52,7 +52,7 @@ void dcn20_program_output_csc(struct dc *dc,
 void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
 void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
                struct dc_link_settings *link_settings);
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
 void dcn20_disable_pixel_data(
                struct dc *dc,
                struct pipe_ctx *pipe_ctx,
index d3fe6092f50e8f8a1ff72f59c79f17a2f6167bf6..d5769f38874fd4d1d9bd72d988f9e53bfa966c92 100644 (file)
@@ -320,7 +320,7 @@ void dcn201_init_hw(struct dc *dc)
                res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
                pipe_ctx->stream_res.opp = res_pool->opps[i];
                /*To do: number of MPCC != number of opp*/
-               hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+               hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
        }
 
        /* initialize DWB pointer to MCIF_WB */
@@ -337,7 +337,7 @@ void dcn201_init_hw(struct dc *dc)
        for (i = 0; i < res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
-               dc->hwss.disable_plane(dc, pipe_ctx);
+               dc->hwss.disable_plane(dc, context, pipe_ctx);
 
                pipe_ctx->stream_res.tg = NULL;
                pipe_ctx->plane_res.hubp = NULL;
@@ -369,7 +369,9 @@ void dcn201_init_hw(struct dc *dc)
 }
 
 /* trigger HW to start disconnect plane from stream on the next vsync */
-void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn201_plane_atomic_disconnect(struct dc *dc,
+               struct dc_state *state,
+               struct pipe_ctx *pipe_ctx)
 {
        struct dce_hwseq *hws = dc->hwseq;
        struct hubp *hubp = pipe_ctx->plane_res.hubp;
index 26cd62be64181e643a79e8a2788ce2d8499236c1..6a50a9894be6ae3caff35eb303efc7178f74acd9 100644 (file)
@@ -33,7 +33,7 @@ void dcn201_init_hw(struct dc *dc);
 void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx,
                struct dc_link_settings *link_settings);
 void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
-void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn201_plane_atomic_disconnect(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
 void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
 void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
 void dcn201_pipe_control_lock(
index 08783ad097d2140f79d69c40a55e5ead7241f5f4..8e88dcaf88f5b2b709a95abf9e0673390e27daa5 100644 (file)
@@ -154,7 +154,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst,
        cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
        cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
@@ -173,7 +173,7 @@ static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm
        cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
        cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
 
-       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
index fd8a8c10a2019275222bb5e39950262f5ca65c6b..c34c13e1e0a4ea918de9a9e36dbe305ce5224485 100644 (file)
@@ -51,7 +51,7 @@
 #include "dcn20/dcn20_hwseq.h"
 #include "dcn30/dcn30_resource.h"
 #include "link.h"
-
+#include "dc_state_priv.h"
 
 
 
@@ -476,6 +476,7 @@ void dcn30_init_hw(struct dc *dc)
        int i;
        int edp_num;
        uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+       uint32_t user_level = MAX_BACKLIGHT_LEVEL;
 
        if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
                dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -612,13 +613,15 @@ void dcn30_init_hw(struct dc *dc)
        for (i = 0; i < dc->link_count; i++) {
                struct dc_link *link = dc->links[i];
 
-               if (link->panel_cntl)
+               if (link->panel_cntl) {
                        backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+                       user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+               }
        }
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                if (abms[i] != NULL)
-                       abms[i]->funcs->abm_init(abms[i], backlight);
+                       abms[i]->funcs->abm_init(abms[i], backlight, user_level);
        }
 
        /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
@@ -754,7 +757,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
                                cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
                                cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
 
-                               dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+                               dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 
                                return true;
                        }
@@ -876,7 +879,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
                                        cmd.mall.cursor_height = cursor_attr.height;
                                        cmd.mall.cursor_pitch = cursor_attr.pitch;
 
-                                       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+                                       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
                                        /* Use copied cursor, and it's okay to not switch back */
                                        cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
@@ -892,7 +895,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
                                cmd.mall.tmr_scale = tmr_scale;
                                cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
 
-                               dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+                               dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 
                                return true;
                        }
@@ -909,7 +912,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
        cmd.mall.header.payload_bytes =
                sizeof(cmd.mall) - sizeof(cmd.mall.header);
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
@@ -966,7 +969,7 @@ void dcn30_hardware_release(struct dc *dc)
                if (!pipe->stream)
                        continue;
 
-               if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+               if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_MAIN) {
                        subvp_in_use = true;
                        break;
                }
index 5d62805f3bdf6106d26e3c3a5cdf3e3438e8b89e..7423880fabb6e3b0d1a1003bab1fa8ff4898a936 100644 (file)
@@ -113,6 +113,7 @@ void dcn31_init_hw(struct dc *dc)
        struct dc_bios *dcb = dc->ctx->dc_bios;
        struct resource_pool *res_pool = dc->res_pool;
        uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+       uint32_t user_level = MAX_BACKLIGHT_LEVEL;
        int i;
 
        if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -224,13 +225,15 @@ void dcn31_init_hw(struct dc *dc)
        for (i = 0; i < dc->link_count; i++) {
                struct dc_link *link = dc->links[i];
 
-               if (link->panel_cntl)
+               if (link->panel_cntl) {
                        backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+                       user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+               }
        }
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                if (abms[i] != NULL)
-                       abms[i]->funcs->abm_init(abms[i], backlight);
+                       abms[i]->funcs->abm_init(abms[i], backlight, user_level);
        }
 
        /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
@@ -416,7 +419,7 @@ void dcn31_z10_save_init(struct dc *dc)
        cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
        cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 void dcn31_z10_restore(const struct dc *dc)
@@ -434,7 +437,7 @@ void dcn31_z10_restore(const struct dc *dc)
        cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
        cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE;
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
@@ -524,7 +527,8 @@ static void dcn31_reset_back_end_for_pipe(
        if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
                pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
                                pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
-       pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+       if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
 
        if (pipe_ctx->stream_res.tg->funcs->set_drr)
                pipe_ctx->stream_res.tg->funcs->set_drr(
index 5c323718ec906d68288c89e2c4935f66c06f7e2b..6c9299c7683df19b3c444b865d297182d91ae7b3 100644 (file)
@@ -51,6 +51,7 @@
 #include "dcn32/dcn32_resource.h"
 #include "link.h"
 #include "../dcn20/dcn20_hwseq.h"
+#include "dc_state_priv.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -277,7 +278,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
                                cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
                                cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
 
-                               dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+                               dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 
                                return true;
                        }
@@ -311,7 +312,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
                                cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
                                cmd.cab.cab_alloc_ways = (uint8_t)ways;
 
-                               dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+                               dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 
                                return true;
                        }
@@ -327,7 +328,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
        cmd.cab.header.payload_bytes =
                        sizeof(cmd.cab) - sizeof(cmd.cab.header);
 
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+       dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 
        return true;
 }
@@ -348,8 +349,7 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
-               if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream &&
-                               pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+               if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
                        // There is at least 1 SubVP pipe, so enable SubVP
                        enable_subvp = true;
                        break;
@@ -375,18 +375,20 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
        bool subvp_immediate_flip = false;
        bool subvp_in_use = false;
        struct pipe_ctx *pipe;
+       enum mall_stream_type pipe_mall_type = SUBVP_NONE;
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                pipe = &context->res_ctx.pipe_ctx[i];
+               pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 
-               if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+               if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN) {
                        subvp_in_use = true;
                        break;
                }
        }
 
        if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) {
-               if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN &&
+               if (dc_state_get_pipe_subvp_type(context, top_pipe_to_program) == SUBVP_MAIN &&
                                top_pipe_to_program->plane_state->flip_immediate)
                        subvp_immediate_flip = true;
        }
@@ -398,7 +400,7 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
                if (!lock) {
                        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                                pipe = &context->res_ctx.pipe_ctx[i];
-                               if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
+                               if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN &&
                                                should_lock_all_pipes)
                                        pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
                        }
@@ -416,14 +418,7 @@ void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params)
 {
        struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc;
        bool lock = params->subvp_pipe_control_lock_fast_params.lock;
-       struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx;
-       bool subvp_immediate_flip = false;
-
-       if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) {
-               if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN &&
-                               pipe_ctx->plane_state->flip_immediate)
-                       subvp_immediate_flip = true;
-       }
+       bool subvp_immediate_flip = params->subvp_pipe_control_lock_fast_params.subvp_immediate_flip;
 
        // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared.
        if (subvp_immediate_flip) {
@@ -609,7 +604,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
                struct hubp *hubp = pipe->plane_res.hubp;
 
-               if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+               if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN ||
                    pipe->stream->fpo_in_use)) {
                        if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
                                hubp->funcs->hubp_update_force_pstate_disallow(hubp, false);
@@ -624,7 +619,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
                struct hubp *hubp = pipe->plane_res.hubp;
 
-               if (pipe->stream && (pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+               if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN ||
                                pipe->stream->fpo_in_use)) {
                        if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
                                hubp->funcs->hubp_update_force_pstate_disallow(hubp, true);
@@ -671,8 +666,8 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
                        if (cursor_size > 16384)
                                cache_cursor = true;
 
-                       if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
-                                       hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
+                       if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+                               hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
                        } else {
                                // MALL not supported with Stereo3D
                                hubp->funcs->hubp_update_mall_sel(hubp,
@@ -714,9 +709,8 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)
                         *        see if CURSOR_REQ_MODE will be back to 1 for SubVP
                         *        when it should be 0 for MPO
                         */
-                       if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+                       if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
                                hubp->funcs->hubp_prepare_subvp_buffering(hubp, true);
-                       }
                }
        }
 }
@@ -759,6 +753,7 @@ void dcn32_init_hw(struct dc *dc)
        int i;
        int edp_num;
        uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+       uint32_t user_level = MAX_BACKLIGHT_LEVEL;
 
        if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
                dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -913,13 +908,15 @@ void dcn32_init_hw(struct dc *dc)
        for (i = 0; i < dc->link_count; i++) {
                struct dc_link *link = dc->links[i];
 
-               if (link->panel_cntl)
+               if (link->panel_cntl) {
                        backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+                       user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+               }
        }
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                if (abms[i] != NULL && abms[i]->funcs != NULL)
-                       abms[i]->funcs->abm_init(abms[i], backlight);
+                       abms[i]->funcs->abm_init(abms[i], backlight, user_level);
        }
 
        /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
@@ -960,6 +957,12 @@ void dcn32_init_hw(struct dc *dc)
                dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
                dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
                dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+
+               if (dc->ctx->dmub_srv->dmub->fw_version <
+                   DMUB_FW_VERSION(7, 0, 35)) {
+                       dc->debug.force_disable_subvp = true;
+                       dc->debug.disable_fpo_optimizations = true;
+               }
        }
 }
 
@@ -1222,7 +1225,7 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_
                        continue;
 
                if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))
-                       && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+                       && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) {
                        pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
                        reset_sync_context_for_pipe(dc, context, i);
                        otg_disabled[i] = true;
@@ -1373,8 +1376,8 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
-               if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
-                               pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) {
+               if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN &&
+                               dc_state_get_paired_subvp_stream(context, pipe->stream) == phantom_pipe->stream) {
                        if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) {
 
                                phantom_plane->src_rect.x = pipe->plane_state->src_rect.x;
@@ -1399,21 +1402,19 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
 void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe)
 {
        phantom_pipe->update_flags.raw = 0;
-       if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
-               if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) {
-                       phantom_pipe->update_flags.bits.enable = 1;
-                       phantom_pipe->update_flags.bits.mpcc = 1;
-                       phantom_pipe->update_flags.bits.dppclk = 1;
-                       phantom_pipe->update_flags.bits.hubp_interdependent = 1;
-                       phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
-                       phantom_pipe->update_flags.bits.gamut_remap = 1;
-                       phantom_pipe->update_flags.bits.scaler = 1;
-                       phantom_pipe->update_flags.bits.viewport = 1;
-                       phantom_pipe->update_flags.bits.det_size = 1;
-                       if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) {
-                               phantom_pipe->update_flags.bits.odm = 1;
-                               phantom_pipe->update_flags.bits.global_sync = 1;
-                       }
+       if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) {
+               phantom_pipe->update_flags.bits.enable = 1;
+               phantom_pipe->update_flags.bits.mpcc = 1;
+               phantom_pipe->update_flags.bits.dppclk = 1;
+               phantom_pipe->update_flags.bits.hubp_interdependent = 1;
+               phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+               phantom_pipe->update_flags.bits.gamut_remap = 1;
+               phantom_pipe->update_flags.bits.scaler = 1;
+               phantom_pipe->update_flags.bits.viewport = 1;
+               phantom_pipe->update_flags.bits.det_size = 1;
+               if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) {
+                       phantom_pipe->update_flags.bits.odm = 1;
+                       phantom_pipe->update_flags.bits.global_sync = 1;
                }
        }
 }
@@ -1485,8 +1486,8 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
                 * pipe, wait for the double buffer update to complete first before we do
                 * ANY phantom pipe programming.
                 */
-               if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
-                               old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+               if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM &&
+                               old_pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) != SUBVP_PHANTOM) {
                        old_pipe->stream_res.tg->funcs->wait_for_state(
                                        old_pipe->stream_res.tg,
                                        CRTC_STATE_VBLANK);
@@ -1498,7 +1499,7 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
 
-               if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+               if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) {
                        // If old context or new context has phantom pipes, apply
                        // the phantom timings now. We can't change the phantom
                        // pipe configuration safely without driver acquiring
index 9262d33361821a6ad5797920dff9a8e3e038eada..9c806385ecbdcce6c0d14f949ea41879758969f7 100644 (file)
@@ -56,6 +56,7 @@
 #include "dcn30/dcn30_cm_common.h"
 #include "dcn31/dcn31_hwseq.h"
 #include "dcn20/dcn20_hwseq.h"
+#include "dc_state_priv.h"
 
 #define DC_LOGGER_INIT(logger) \
        struct dal_logger *dc_logger = logger
@@ -133,6 +134,7 @@ void dcn35_init_hw(struct dc *dc)
        struct dc_bios *dcb = dc->ctx->dc_bios;
        struct resource_pool *res_pool = dc->res_pool;
        uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+       uint32_t user_level = MAX_BACKLIGHT_LEVEL;
        int i;
 
        if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -279,13 +281,15 @@ void dcn35_init_hw(struct dc *dc)
        for (i = 0; i < dc->link_count; i++) {
                struct dc_link *link = dc->links[i];
 
-               if (link->panel_cntl)
+               if (link->panel_cntl) {
                        backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+                       user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+               }
        }
        if (dc->ctx->dmub_srv) {
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                if (abms[i] != NULL && abms[i]->funcs != NULL)
-                       abms[i]->funcs->abm_init(abms[i], backlight);
+                       abms[i]->funcs->abm_init(abms[i], backlight, user_level);
                }
        }
 
@@ -687,11 +691,7 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
        }
 
        // TODO: review other cases when idle optimization is allowed
-
-       if (!enable)
-               dc_dmub_srv_exit_low_power_state(dc);
-       else
-               dc_dmub_srv_notify_idle(dc, enable);
+       dc_dmub_srv_apply_idle_power_optimizations(dc, enable);
 
        return true;
 }
@@ -701,7 +701,7 @@ void dcn35_z10_restore(const struct dc *dc)
        if (dc->debug.disable_z10)
                return;
 
-       dc_dmub_srv_exit_low_power_state(dc);
+       dc_dmub_srv_apply_idle_power_optimizations(dc, false);
 
        dcn31_z10_restore(dc);
 }
@@ -817,12 +817,12 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
                dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
                pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
 
-               hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+               hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
 
                if (tg->funcs->is_tg_enabled(tg))
                        tg->funcs->unlock(tg);
 
-               dc->hwss.disable_plane(dc, pipe_ctx);
+               dc->hwss.disable_plane(dc, context, pipe_ctx);
 
                pipe_ctx->stream_res.tg = NULL;
                pipe_ctx->plane_res.hubp = NULL;
@@ -949,10 +949,10 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
        pipe_ctx->plane_state = NULL;
 }
 
-void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
 {
        struct dce_hwseq *hws = dc->hwseq;
-       bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+       bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
        struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
 
        DC_LOGGER_INIT(dc->ctx->logger);
@@ -1123,21 +1123,28 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
                update_state->pg_res_update[PG_HPO] = true;
 
 }
+
 /**
-        * power down sequence
-        * ONO Region 3, DCPG 25: hpo - SKIPPED
-        * ONO Region 4, DCPG 0: dchubp0, dpp0
-        * ONO Region 6, DCPG 1: dchubp1, dpp1
-        * ONO Region 8, DCPG 2: dchubp2, dpp2
-        * ONO Region 10, DCPG 3: dchubp3, dpp3
-        * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
-        * ONO Region 5, DCPG 16: dsc0
-        * ONO Region 7, DCPG 17: dsc1
-        * ONO Region 9, DCPG 18: dsc2
-        * ONO Region 11, DCPG 19: dsc3
-        * ONO Region 2, DCPG 24: mpc opp optc dwb
-        * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
-*/
+ * dcn35_hw_block_power_down() - power down sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power down:
+ *
+ *     ONO Region 3, DCPG 25: hpo - SKIPPED
+ *     ONO Region 4, DCPG 0: dchubp0, dpp0
+ *     ONO Region 6, DCPG 1: dchubp1, dpp1
+ *     ONO Region 8, DCPG 2: dchubp2, dpp2
+ *     ONO Region 10, DCPG 3: dchubp3, dpp3
+ *     ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
+ *     ONO Region 5, DCPG 16: dsc0
+ *     ONO Region 7, DCPG 17: dsc1
+ *     ONO Region 9, DCPG 18: dsc2
+ *     ONO Region 11, DCPG 19: dsc3
+ *     ONO Region 2, DCPG 24: mpc opp optc dwb
+ *     ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
 void dcn35_hw_block_power_down(struct dc *dc,
        struct pg_block_update *update_state)
 {
@@ -1175,20 +1182,27 @@ void dcn35_hw_block_power_down(struct dc *dc,
        //domain22, 23, 25 currently always on.
 
 }
+
 /**
-        * power up sequence
-        * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
-        * ONO Region 2, DCPG 24: mpc opp optc dwb
-        * ONO Region 5, DCPG 16: dsc0
-        * ONO Region 7, DCPG 17: dsc1
-        * ONO Region 9, DCPG 18: dsc2
-        * ONO Region 11, DCPG 19: dsc3
-        * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
-        * ONO Region 4, DCPG 0: dchubp0, dpp0
-        * ONO Region 6, DCPG 1: dchubp1, dpp1
-        * ONO Region 8, DCPG 2: dchubp2, dpp2
-        * ONO Region 10, DCPG 3: dchubp3, dpp3
-        * ONO Region 3, DCPG 25: hpo - SKIPPED
+ * dcn35_hw_block_power_up() - power up sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power up:
+ *
+ *     ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
+ *     ONO Region 2, DCPG 24: mpc opp optc dwb
+ *     ONO Region 5, DCPG 16: dsc0
+ *     ONO Region 7, DCPG 17: dsc1
+ *     ONO Region 9, DCPG 18: dsc2
+ *     ONO Region 11, DCPG 19: dsc3
+ *     ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
+ *     ONO Region 4, DCPG 0: dchubp0, dpp0
+ *     ONO Region 6, DCPG 1: dchubp1, dpp1
+ *     ONO Region 8, DCPG 2: dchubp2, dpp2
+ *     ONO Region 10, DCPG 3: dchubp3, dpp3
+ *     ONO Region 3, DCPG 25: hpo - SKIPPED
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
  */
 void dcn35_hw_block_power_up(struct dc *dc,
        struct pg_block_update *update_state)
@@ -1315,3 +1329,44 @@ uint32_t dcn35_get_idle_state(const struct dc *dc)
 
        return 0;
 }
+
+void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+               int num_pipes, struct dc_crtc_timing_adjust adjust)
+{
+       int i = 0;
+       struct drr_params params = {0};
+       // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
+       unsigned int event_triggers = 0x800;
+       // Note DRR trigger events are generated regardless of whether num frames met.
+       unsigned int num_frames = 2;
+
+       params.vertical_total_max = adjust.v_total_max;
+       params.vertical_total_min = adjust.v_total_min;
+       params.vertical_total_mid = adjust.v_total_mid;
+       params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
+
+       for (i = 0; i < num_pipes; i++) {
+               if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
+                       struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
+                       struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
+
+                       if (dc->debug.static_screen_wait_frames) {
+                               unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
+
+                               if (frame_rate >= 120 && dc->caps.ips_support &&
+                                       dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
+                                       /*ips enable case*/
+                                       num_frames = 2 * (frame_rate % 60);
+                               }
+                       }
+                       if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
+                               pipe_ctx[i]->stream_res.tg->funcs->set_drr(
+                                       pipe_ctx[i]->stream_res.tg, &params);
+                       if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+                               if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
+                                       pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
+                                               pipe_ctx[i]->stream_res.tg,
+                                               event_triggers, num_frames);
+               }
+       }
+}
index 3837038dc4a8f4a35645dc932401937aa7f32c6a..fd66316e33de367da8c90e3520087fce385ebb5b 100644 (file)
@@ -57,7 +57,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context);
 void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
 void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
                               struct dc_state *context);
-void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
 
 void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
        struct pg_block_update *update_state);
@@ -86,4 +86,8 @@ void dcn35_dsc_pg_control(
 
 void dcn35_set_idle_state(const struct dc *dc, bool allow_idle);
 uint32_t dcn35_get_idle_state(const struct dc *dc);
+
+void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+               int num_pipes, struct dc_crtc_timing_adjust adjust);
+
 #endif /* __DC_HWSS_DCN35_H__ */
similarity index 99%
rename from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c
rename to drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index d594905eb246fd12fd15745f61201238650eccab..a630aa77dcec036c791c3ae4a75a2e3bedafe2f8 100644 (file)
@@ -68,7 +68,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
        .prepare_bandwidth = dcn35_prepare_bandwidth,
        .optimize_bandwidth = dcn35_optimize_bandwidth,
        .update_bandwidth = dcn20_update_bandwidth,
-       .set_drr = dcn10_set_drr,
+       .set_drr = dcn35_set_drr,
        .get_position = dcn10_get_position,
        .set_static_screen_control = dcn30_set_static_screen_control,
        .setup_stereo = dcn10_setup_stereo,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
new file mode 100644 (file)
index 0000000..951ca2d
--- /dev/null
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+  dcn351_init.c
+  dcn351_init.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
new file mode 100644 (file)
index 0000000..b24ad27
--- /dev/null
@@ -0,0 +1,17 @@
+#
+# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved
+#
+#  All rights reserved.  This notice is intended as a precaution against
+#  inadvertent publication and does not imply publication or any waiver
+#  of confidentiality.  The year included in the foregoing notice is the
+#  year of creation of the work.
+#
+#  Authors: AMD
+#
+# Makefile for DCN351.
+
+DCN351 = dcn351_init.o
+
+AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN351)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
new file mode 100644 (file)
index 0000000..143d3fc
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hwseq.h"
+#include "dcn10/dcn10_hwseq.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dcn301/dcn301_hwseq.h"
+#include "dcn31/dcn31_hwseq.h"
+#include "dcn32/dcn32_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
+
+#include "dcn351_init.h"
+
+static const struct hw_sequencer_funcs dcn351_funcs = {
+       .program_gamut_remap = dcn30_program_gamut_remap,
+       .init_hw = dcn35_init_hw,
+       .power_down_on_boot = dcn35_power_down_on_boot,
+       .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+       .apply_ctx_for_surface = NULL,
+       .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+       .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+       .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+       .update_plane_addr = dcn20_update_plane_addr,
+       .update_dchub = dcn10_update_dchub,
+       .update_pending_status = dcn10_update_pending_status,
+       .program_output_csc = dcn20_program_output_csc,
+       .enable_accelerated_mode = dce110_enable_accelerated_mode,
+       .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+       .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+       .update_info_frame = dcn31_update_info_frame,
+       .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+       .enable_stream = dcn20_enable_stream,
+       .disable_stream = dce110_disable_stream,
+       .unblank_stream = dcn32_unblank_stream,
+       .blank_stream = dce110_blank_stream,
+       .enable_audio_stream = dce110_enable_audio_stream,
+       .disable_audio_stream = dce110_disable_audio_stream,
+       .disable_plane = dcn35_disable_plane,
+       .disable_pixel_data = dcn20_disable_pixel_data,
+       .pipe_control_lock = dcn20_pipe_control_lock,
+       .interdependent_update_lock = dcn10_lock_all_pipes,
+       .cursor_lock = dcn10_cursor_lock,
+       .prepare_bandwidth = dcn35_prepare_bandwidth,
+       .optimize_bandwidth = dcn35_optimize_bandwidth,
+       .update_bandwidth = dcn20_update_bandwidth,
+       .set_drr = dcn10_set_drr,
+       .get_position = dcn10_get_position,
+       .set_static_screen_control = dcn30_set_static_screen_control,
+       .setup_stereo = dcn10_setup_stereo,
+       .set_avmute = dcn30_set_avmute,
+       .log_hw_state = dcn10_log_hw_state,
+       .get_hw_state = dcn10_get_hw_state,
+       .clear_status_bits = dcn10_clear_status_bits,
+       .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
+       .edp_power_control = dce110_edp_power_control,
+       .edp_wait_for_T12 = dce110_edp_wait_for_T12,
+       .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+       .set_cursor_position = dcn10_set_cursor_position,
+       .set_cursor_attribute = dcn10_set_cursor_attribute,
+       .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+       .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+       .set_clock = dcn10_set_clock,
+       .get_clock = dcn10_get_clock,
+       .program_triplebuffer = dcn20_program_triple_buffer,
+       .enable_writeback = dcn30_enable_writeback,
+       .disable_writeback = dcn30_disable_writeback,
+       .update_writeback = dcn30_update_writeback,
+       .mmhubbub_warmup = dcn30_mmhubbub_warmup,
+       .dmdata_status_done = dcn20_dmdata_status_done,
+       .program_dmdata_engine = dcn30_program_dmdata_engine,
+       .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+       .init_sys_ctx = dcn31_init_sys_ctx,
+       .init_vm_ctx = dcn20_init_vm_ctx,
+       .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+       .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+       .calc_vupdate_position = dcn10_calc_vupdate_position,
+       .power_down = dce110_power_down,
+       .set_backlight_level = dcn21_set_backlight_level,
+       .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
+       .set_pipe = dcn21_set_pipe,
+       .enable_lvds_link_output = dce110_enable_lvds_link_output,
+       .enable_tmds_link_output = dce110_enable_tmds_link_output,
+       .enable_dp_link_output = dce110_enable_dp_link_output,
+       .disable_link_output = dcn32_disable_link_output,
+       .z10_restore = dcn35_z10_restore,
+       .z10_save_init = dcn31_z10_save_init,
+       .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+       .optimize_pwr_state = dcn21_optimize_pwr_state,
+       .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+       .update_visual_confirm_color = dcn10_update_visual_confirm_color,
+       .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
+       .update_dsc_pg = dcn32_update_dsc_pg,
+       .calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
+       .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
+       .hw_block_power_up = dcn35_hw_block_power_up,
+       .hw_block_power_down = dcn35_hw_block_power_down,
+       .root_clock_control = dcn35_root_clock_control,
+       .set_idle_state = dcn35_set_idle_state,
+       .get_idle_state = dcn35_get_idle_state
+};
+
+static const struct hwseq_private_funcs dcn351_private_funcs = {
+       .init_pipes = dcn35_init_pipes,
+       .update_plane_addr = dcn20_update_plane_addr,
+       .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+       .update_mpcc = dcn20_update_mpcc,
+       .set_input_transfer_func = dcn32_set_input_transfer_func,
+       .set_output_transfer_func = dcn32_set_output_transfer_func,
+       .power_down = dce110_power_down,
+       .enable_display_power_gating = dcn10_dummy_display_power_gating,
+       .blank_pixel_data = dcn20_blank_pixel_data,
+       .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap,
+       .enable_stream_timing = dcn20_enable_stream_timing,
+       .edp_backlight_control = dce110_edp_backlight_control,
+       .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+       .did_underflow_occur = dcn10_did_underflow_occur,
+       .init_blank = dcn20_init_blank,
+       .disable_vga = NULL,
+       .bios_golden_init = dcn10_bios_golden_init,
+       .plane_atomic_disable = dcn35_plane_atomic_disable,
+       //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
+       //.hubp_pg_control = dcn35_hubp_pg_control,
+       .enable_power_gating_plane = dcn35_enable_power_gating_plane,
+       .dpp_root_clock_control = dcn35_dpp_root_clock_control,
+       .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+       .update_odm = dcn35_update_odm,
+       .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+       .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+       .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+       .dccg_init = dcn20_dccg_init,
+       .set_mcm_luts = dcn32_set_mcm_luts,
+       .setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+       .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
+       .set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
+       .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+       .dsc_pg_control = dcn35_dsc_pg_control,
+       .dsc_pg_status = dcn32_dsc_pg_status,
+       .enable_plane = dcn35_enable_plane,
+};
+
+void dcn351_hw_sequencer_construct(struct dc *dc)
+{
+       dc->hwss = dcn351_funcs;
+       dc->hwseq->funcs = dcn351_private_funcs;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h
new file mode 100644 (file)
index 0000000..970b010
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN351_INIT_H__
+#define __DC_DCN351_INIT_H__
+
+struct dc;
+
+void dcn351_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN351_INIT_H__ */
index 45dc6d4e956273240f954af6730b6986d24f7cc9..a54399383318145b8bc72fc85e646bf546588609 100644 (file)
@@ -50,7 +50,7 @@ struct pg_block_update;
 struct subvp_pipe_control_lock_fast_params {
        struct dc *dc;
        bool lock;
-       struct pipe_ctx *pipe_ctx;
+       bool subvp_immediate_flip;
 };
 
 struct pipe_control_lock_params {
@@ -200,7 +200,7 @@ struct hw_sequencer_funcs {
                        struct dc_state *context);
        enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
                        struct dc_state *context);
-       void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+       void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
        void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
        void (*apply_ctx_for_surface)(struct dc *dc,
                        const struct dc_stream_state *stream,
@@ -248,6 +248,7 @@ struct hw_sequencer_funcs {
        void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
                        int group_size, struct pipe_ctx *grouped_pipes[]);
        void (*enable_timing_synchronization)(struct dc *dc,
+                       struct dc_state *state,
                        int group_index, int group_size,
                        struct pipe_ctx *grouped_pipes[]);
        void (*enable_vblanks_synchronization)(struct dc *dc,
@@ -454,17 +455,18 @@ void get_mpctree_visual_confirm_color(
                struct tg_color *color);
 
 void get_subvp_visual_confirm_color(
-       struct dc *dc,
-       struct dc_state *context,
        struct pipe_ctx *pipe_ctx,
        struct tg_color *color);
 
 void get_mclk_switch_visual_confirm_color(
-               struct dc *dc,
-               struct dc_state *context,
                struct pipe_ctx *pipe_ctx,
                struct tg_color *color);
 
+void set_p_state_switch_method(
+               struct dc *dc,
+               struct dc_state *context,
+               struct pipe_ctx *pipe_ctx);
+
 void hwss_execute_sequence(struct dc *dc,
                struct block_sequence block_sequence[],
                int num_steps);
@@ -474,7 +476,8 @@ void hwss_build_fast_sequence(struct dc *dc,
                unsigned int dmub_cmd_count,
                struct block_sequence block_sequence[],
                int *num_steps,
-               struct pipe_ctx *pipe_ctx);
+               struct pipe_ctx *pipe_ctx,
+               struct dc_stream_status *stream_status);
 
 void hwss_send_dmcub_cmd(union block_sequence_params *params);
 
index 82c5921668754c54a9379f0a6cb5bd4a780bab66..6137cf09aa54d25750246e86583c5938e557501b 100644 (file)
@@ -79,6 +79,7 @@ struct hwseq_private_funcs {
        void (*update_plane_addr)(const struct dc *dc,
                        struct pipe_ctx *pipe_ctx);
        void (*plane_atomic_disconnect)(struct dc *dc,
+                       struct dc_state *state,
                        struct pipe_ctx *pipe_ctx);
        void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
        bool (*set_input_transfer_func)(struct dc *dc,
index 10397d4dfb0753fb81f971b6280a0f9d817e5f23..f74ae0d41d3c49cf215d615f336339b773cbbcbc 100644 (file)
@@ -200,11 +200,7 @@ struct resource_funcs {
                        unsigned int pipe_cnt,
             unsigned int index);
 
-       bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context, bool fast_update);
-       void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context);
        void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
-       void (*save_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);
-       void (*restore_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);
        void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx);
 };
 
@@ -385,6 +381,16 @@ union pipe_update_flags {
        uint32_t raw;
 };
 
+enum p_state_switch_method {
+       P_STATE_UNKNOWN                                         = 0,
+       P_STATE_V_BLANK                                         = 1,
+       P_STATE_FPO,
+       P_STATE_V_ACTIVE,
+       P_STATE_SUB_VP,
+       P_STATE_DRR_SUB_VP,
+       P_STATE_V_BLANK_SUB_VP
+};
+
 struct pipe_ctx {
        struct dc_plane_state *plane_state;
        struct dc_stream_state *stream;
@@ -433,6 +439,7 @@ struct pipe_ctx {
        struct dwbc *dwbc;
        struct mcif_wb *mcif_wb;
        union pipe_update_flags update_flags;
+       enum p_state_switch_method p_state_type;
        struct tg_color visual_confirm_color;
        bool has_vactive_margin;
        /* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
@@ -526,6 +533,14 @@ struct dc_state {
         * @stream_status: Planes status on a given stream
         */
        struct dc_stream_status stream_status[MAX_PIPES];
+       /**
+        * @phantom_streams: Stream state properties for phantoms
+        */
+       struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES];
+       /**
+        * @phantom_planes: Planes state properties for phantoms
+        */
+       struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES];
 
        /**
         * @stream_count: Total of streams in use
@@ -533,6 +548,14 @@ struct dc_state {
        uint8_t stream_count;
        uint8_t stream_mask;
 
+       /**
+        * @stream_count: Total phantom streams in use
+        */
+       uint8_t phantom_stream_count;
+       /**
+        * @stream_count: Total phantom planes in use
+        */
+       uint8_t phantom_plane_count;
        /**
         * @res_ctx: Persistent state of resources
         */
index 9f521cf0fc5a2b4a629ccf69d1656be0d4da4add..3f0161d6467556720592b842059f0424237bf1dc 100644 (file)
@@ -36,7 +36,7 @@ struct abm {
 };
 
 struct abm_funcs {
-       void (*abm_init)(struct abm *abm, uint32_t back_light);
+       void (*abm_init)(struct abm *abm, uint32_t back_light, uint32_t user_level);
        bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
        bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst);
        bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst);
index fa9614bcb1605f6b75f116ca5387df338bc3dc4c..17e014d3bdc8401893847a4f0fd9670d664f65c5 100644 (file)
@@ -62,6 +62,25 @@ struct dcn3_clk_internal {
        uint32_t CLK4_CLK0_CURRENT_CNT; //fclk
 };
 
+struct dcn35_clk_internal {
+       int dummy;
+       uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk
+       uint32_t CLK1_CLK1_CURRENT_CNT; //dppclk
+       uint32_t CLK1_CLK2_CURRENT_CNT; //dprefclk
+       uint32_t CLK1_CLK3_CURRENT_CNT; //dcfclk
+       uint32_t CLK1_CLK4_CURRENT_CNT; //dtbclk
+       //uint32_t CLK1_CLK5_CURRENT_CNT; //dpiaclk
+       //uint32_t CLK1_CLK6_CURRENT_CNT; //srdbgclk
+       uint32_t CLK1_CLK3_DS_CNTL;         //dcf_deep_sleep_divider
+       uint32_t CLK1_CLK3_ALLOW_DS;    //dcf_deep_sleep_allow
+
+       uint32_t CLK1_CLK0_BYPASS_CNTL; //dispclk bypass
+       uint32_t CLK1_CLK1_BYPASS_CNTL; //dppclk bypass
+       uint32_t CLK1_CLK2_BYPASS_CNTL; //dprefclk bypass
+       uint32_t CLK1_CLK3_BYPASS_CNTL; //dcfclk bypass
+       uint32_t CLK1_CLK4_BYPASS_CNTL; //dtbclk bypass
+};
+
 struct dcn301_clk_internal {
        int dummy;
        uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk
@@ -314,6 +333,7 @@ struct clk_mgr {
        bool force_smu_not_present;
        bool dc_mode_softmax_enabled;
        int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
+       int dp_dto_source_clock_in_khz; // Used to program DP DTO with ss adjustment on DCN314
        int dentist_vco_freq_khz;
        struct clk_state_registers_and_bypass boot_snapshot;
        struct clk_bw_params *bw_params;
index b95ae9596c3b1e541535291d5c09f04132512574..dcae23faeee3d322b64ba4d0bb7fcb1ad7a3c65c 100644 (file)
@@ -43,6 +43,7 @@
  * to be used inside loops and for determining array sizes.
  */
 #define MAX_PIPES 6
+#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
 #define MAX_DIG_LINK_ENCODERS 7
 #define MAX_DWB_PIPES  1
 #define MAX_HPO_DP2_ENCODERS   4
index 248adc1705e3578611ed92b944db2e99b017127f..5dcbaa2db964aee7de17c2e9306606cac1817b08 100644 (file)
@@ -40,6 +40,7 @@ struct panel_cntl_backlight_registers {
        unsigned int BL_PWM_PERIOD_CNTL;
        unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
        unsigned int PANEL_PWRSEQ_REF_DIV2;
+       unsigned int USER_LEVEL;
 };
 
 struct panel_cntl_funcs {
index 7439865d1b50c467fe6af1f0148f31d9cb423a61..26fe81f213da55d39aa7edbb387328192d42685b 100644 (file)
@@ -289,6 +289,8 @@ struct link_service {
        bool (*edp_replay_residency)(const struct dc_link *link,
                        unsigned int *residency, const bool is_start,
                        const bool is_alpm);
+       bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
+                       const unsigned int *power_opts, uint16_t coasting_vtotal);
 
        bool (*edp_wait_for_t12)(struct dc_link *link);
        bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
index 0458d2d749f473adb64f7fb1197fe4ce2b34a575..c958ef37b78a667b1bb9bfb26827ae3e45053715 100644 (file)
@@ -573,9 +573,6 @@ void update_audio_usage(
 
 unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
 
-void get_audio_check(struct audio_info *aud_modes,
-       struct audio_check *aud_chk);
-
 bool get_temp_dp_link_res(struct dc_link *link,
                struct link_resource *link_res,
                struct dc_link_settings *link_settings);
@@ -622,5 +619,4 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
                struct pipe_ctx *pipe_ctx);
 
 bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
-
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
index 5fe8b4871c77614eb0fd46421db49fb79197e6f7..3cbfbf8d107e9b62c639ef1618041b8fc09dd9b5 100644 (file)
@@ -900,11 +900,15 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
 {
        struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
        struct dc_stream_state *stream = pipe_ctx->stream;
-       DC_LOGGER_INIT(dsc->ctx->logger);
 
-       if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
+       if (!pipe_ctx->stream->timing.flags.DSC)
                return false;
 
+       if (!dsc)
+               return false;
+
+       DC_LOGGER_INIT(dsc->ctx->logger);
+
        if (enable) {
                struct dsc_config dsc_cfg;
                uint8_t dsc_packed_pps[128];
@@ -2005,17 +2009,11 @@ static enum dc_status enable_link_dp(struct dc_state *state,
                }
        }
 
-       /*
-        * If the link is DP-over-USB4 do the following:
-        * - Train with fallback when enabling DPIA link. Conventional links are
+       /* Train with fallback when enabling DPIA link. Conventional links are
         * trained with fallback during sink detection.
-        * - Allocate only what the stream needs for bw in Gbps. Inform the CM
-        * in case stream needs more or less bw from what has been allocated
-        * earlier at plug time.
         */
-       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
                do_fallback = true;
-       }
 
        /*
         * Temporary w/a to get DP2.0 link rates to work with SST.
@@ -2197,6 +2195,32 @@ static enum dc_status enable_link(
        return status;
 }
 
+static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
+{
+       return true;
+}
+
+static bool allocate_usb4_bandwidth(struct dc_stream_state *stream)
+{
+       bool ret;
+
+       int bw = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+                       dc_link_get_highest_encoding_format(stream->sink->link));
+
+       ret = allocate_usb4_bandwidth_for_stream(stream, bw);
+
+       return ret;
+}
+
+static bool deallocate_usb4_bandwidth(struct dc_stream_state *stream)
+{
+       bool ret;
+
+       ret = allocate_usb4_bandwidth_for_stream(stream, 0);
+
+       return ret;
+}
+
 void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
 {
        struct dc  *dc = pipe_ctx->stream->ctx->dc;
@@ -2232,6 +2256,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
        update_psp_stream_config(pipe_ctx, true);
        dc->hwss.blank_stream(pipe_ctx);
 
+       if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+               deallocate_usb4_bandwidth(pipe_ctx->stream);
+
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                deallocate_mst_payload(pipe_ctx);
        else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
@@ -2474,6 +2501,9 @@ void link_set_dpms_on(
                }
        }
 
+       if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+               allocate_usb4_bandwidth(pipe_ctx->stream);
+
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                allocate_mst_payload(pipe_ctx);
        else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
index 5464d8d26bd371e7df97d0599275dbb705fb5fc9..37d3027c32dcb1007dbb90e209f7f459be81617e 100644 (file)
@@ -216,6 +216,7 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s
        link_srv->edp_send_replay_cmd = edp_send_replay_cmd;
        link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal;
        link_srv->edp_replay_residency = edp_replay_residency;
+       link_srv->edp_set_replay_power_opt_and_coasting_vtotal = edp_set_replay_power_opt_and_coasting_vtotal;
 
        link_srv->edp_wait_for_t12 = edp_wait_for_t12;
        link_srv->edp_is_ilr_optimization_required =
index b45fda96eaf649bf16f291df2294d787680e0287..8fe66c3678508d9aee6779fa25cd6128e1f30832 100644 (file)
@@ -346,23 +346,61 @@ enum dc_status link_validate_mode_timing(
        return DC_OK;
 }
 
+/*
+ * This function calculates the bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective dpia link
+ *
+ * @stream: pointer to the dc_stream_state struct instance
+ * @num_streams: number of streams to be validated
+ *
+ * return: true if validation is succeeded
+ */
 bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
 {
-       bool ret = true;
-       int bw_needed[MAX_DPIA_NUM];
-       struct dc_link *link[MAX_DPIA_NUM];
-
-       if (!num_streams || num_streams > MAX_DPIA_NUM)
-               return ret;
+       int bw_needed[MAX_DPIA_NUM] = {0};
+       struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
+       int num_dpias = 0;
 
        for (uint8_t i = 0; i < num_streams; ++i) {
+               if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
+                       /* new dpia sst stream, check whether it exceeds max dpia */
+                       if (num_dpias >= MAX_DPIA_NUM)
+                               return false;
 
-               link[i] = stream[i].link;
-               bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
-                               dc_link_get_highest_encoding_format(link[i]));
+                       dpia_link[num_dpias] = stream[i].link;
+                       bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+                                       dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
+                       num_dpias++;
+               } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+                       uint8_t j = 0;
+                       /* check whether its a known dpia link */
+                       for (; j < num_dpias; ++j) {
+                               if (dpia_link[j] == stream[i].link)
+                                       break;
+                       }
+
+                       if (j == num_dpias) {
+                               /* new dpia mst stream, check whether it exceeds max dpia */
+                               if (num_dpias >= MAX_DPIA_NUM)
+                                       return false;
+                               else {
+                                       dpia_link[j] = stream[i].link;
+                                       num_dpias++;
+                               }
+                       }
+
+                       bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+                               dc_link_get_highest_encoding_format(dpia_link[j]));
+               }
        }
 
-       ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
+       /* Include dp overheads */
+       for (uint8_t i = 0; i < num_dpias; ++i) {
+               int dp_overhead = 0;
+
+               dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
+               bw_needed[i] += dp_overhead;
+       }
 
-       return ret;
+       return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
 }
index 4a954317d0daccebc10278399615db202e6f9333..595fb05946e9d13f52b5052d6167fca1ed401038 100644 (file)
@@ -25,6 +25,7 @@
 #ifndef __LINK_VALIDATION_H__
 #define __LINK_VALIDATION_H__
 #include "link.h"
+
 enum dc_status link_validate_mode_timing(
                const struct dc_stream_state *stream,
                struct dc_link *link,
index 3c5334cdb3fb942b8e16a47a13097ba4e573c363..289f5d1333424b02ab18efb3542dc740ae3d70a7 100644 (file)
@@ -1398,7 +1398,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
        cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data);
        cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
                        link->dc, link->link_enc->transmitter);
-       if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+       if (dc_wake_and_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
                        cmd.cable_id.header.ret_status == 1) {
                cable_id->raw = cmd.cable_id.data.output_raw;
                DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
index 0bb7491339098a5ee99ae8e210dc04f6fb4c61c2..6af42ba9885c054ead528e11d14007622af098a8 100644 (file)
@@ -82,24 +82,33 @@ bool dpia_query_hpd_status(struct dc_link *link)
 {
        union dmub_rb_cmd cmd = {0};
        struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
-       bool is_hpd_high = false;
 
        /* prepare QUERY_HPD command */
        cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
        cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
        cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
 
-       /* Return HPD status reported by DMUB if query successfully executed. */
-       if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
-               is_hpd_high = cmd.query_hpd.data.result;
-
-       DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
-               __func__,
-               link->link_index,
-               link->link_id.enum_id - ENUM_ID_1,
-               cmd.query_hpd.data.status,
-               cmd.query_hpd.data.result);
-
-       return is_hpd_high;
+       /* Query dpia hpd status from dmub */
+       if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd,
+               DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+           cmd.query_hpd.data.status == AUX_RET_SUCCESS) {
+               DC_LOG_DEBUG("%s: for link(%d) dpia(%d) success, current_hpd_status(%d) new_hpd_status(%d)\n",
+                       __func__,
+                       link->link_index,
+                       link->link_id.enum_id - ENUM_ID_1,
+                       link->hpd_status,
+                       cmd.query_hpd.data.result);
+               link->hpd_status = cmd.query_hpd.data.result;
+       } else {
+               DC_LOG_ERROR("%s: for link(%d) dpia(%d) failed with status(%d), current_hpd_status(%d) new_hpd_status(0)\n",
+                       __func__,
+                       link->link_index,
+                       link->link_id.enum_id - ENUM_ID_1,
+                       cmd.query_hpd.data.status,
+                       link->hpd_status);
+               link->hpd_status = false;
+       }
+
+       return link->hpd_status;
 }
 
index 7581023daa4789ec24a6a11fe3519c63c41dea05..dd0d2b206462c927c5f68b355498e71250c154b9 100644 (file)
@@ -50,15 +50,28 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
                        && tmp->hpd_status
                        && tmp->dpia_bw_alloc_config.bw_alloc_enabled);
 }
+
 static void reset_bw_alloc_struct(struct dc_link *link)
 {
        link->dpia_bw_alloc_config.bw_alloc_enabled = false;
-       link->dpia_bw_alloc_config.sink_verified_bw = 0;
-       link->dpia_bw_alloc_config.sink_max_bw = 0;
+       link->dpia_bw_alloc_config.link_verified_bw = 0;
+       link->dpia_bw_alloc_config.link_max_bw = 0;
+       link->dpia_bw_alloc_config.allocated_bw = 0;
        link->dpia_bw_alloc_config.estimated_bw = 0;
        link->dpia_bw_alloc_config.bw_granularity = 0;
+       link->dpia_bw_alloc_config.dp_overhead = 0;
        link->dpia_bw_alloc_config.response_ready = false;
+       link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
+       link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
+       for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
+               link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0;
+       DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index);
 }
+
+#define BW_GRANULARITY_0 4 // 0.25 Gbps
+#define BW_GRANULARITY_1 2 // 0.5 Gbps
+#define BW_GRANULARITY_2 1 // 1 Gbps
+
 static uint8_t get_bw_granularity(struct dc_link *link)
 {
        uint8_t bw_granularity = 0;
@@ -71,16 +84,20 @@ static uint8_t get_bw_granularity(struct dc_link *link)
 
        switch (bw_granularity & 0x3) {
        case 0:
-               bw_granularity = 4;
+               bw_granularity = BW_GRANULARITY_0;
                break;
        case 1:
+               bw_granularity = BW_GRANULARITY_1;
+               break;
+       case 2:
        default:
-               bw_granularity = 2;
+               bw_granularity = BW_GRANULARITY_2;
                break;
        }
 
        return bw_granularity;
 }
+
 static int get_estimated_bw(struct dc_link *link)
 {
        uint8_t bw_estimated_bw = 0;
@@ -93,31 +110,33 @@ static int get_estimated_bw(struct dc_link *link)
 
        return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
 }
-static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link)
+
+static int get_non_reduced_max_link_rate(struct dc_link *link)
 {
-       if (bw_needed > 0)
-               *stream_allocated_bw += bw_needed;
+       uint8_t nrd_max_link_rate = 0;
 
-       return true;
+       core_link_read_dpcd(
+                       link,
+                       DP_TUNNELING_MAX_LINK_RATE,
+                       &nrd_max_link_rate,
+                       sizeof(uint8_t));
+
+       return nrd_max_link_rate;
 }
-static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link)
-{
-       bool ret = false;
 
-       if (*stream_allocated_bw > 0) {
-               *stream_allocated_bw -= bw_to_dealloc;
-               ret = true;
-       } else {
-               //Do nothing for now
-               ret = true;
-       }
+static int get_non_reduced_max_lane_count(struct dc_link *link)
+{
+       uint8_t nrd_max_lane_count = 0;
 
-       // Unplug so reset values
-       if (!link->hpd_status)
-               reset_bw_alloc_struct(link);
+       core_link_read_dpcd(
+                       link,
+                       DP_TUNNELING_MAX_LANE_COUNT,
+                       &nrd_max_lane_count,
+                       sizeof(uint8_t));
 
-       return ret;
+       return nrd_max_lane_count;
 }
+
 /*
  * Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
  * granuality, Driver_ID, CM_Group, & populate the BW allocation structs
@@ -125,10 +144,22 @@ static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, stru
  */
 static void init_usb4_bw_struct(struct dc_link *link)
 {
-       // Init the known values
+       reset_bw_alloc_struct(link);
+
+       /* init the known values */
        link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
        link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+       link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link);
+       link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link);
+
+       DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
+               __func__, link->dpia_bw_alloc_config.bw_granularity,
+               link->dpia_bw_alloc_config.estimated_bw);
+       DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n",
+               __func__, link->dpia_bw_alloc_config.nrd_max_link_rate,
+               link->dpia_bw_alloc_config.nrd_max_lane_count);
 }
+
 static uint8_t get_lowest_dpia_index(struct dc_link *link)
 {
        const struct dc *dc_struct = link->dc;
@@ -141,51 +172,66 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
                                dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
                        continue;
 
-               if (idx > dc_struct->links[i]->link_index)
+               if (idx > dc_struct->links[i]->link_index) {
                        idx = dc_struct->links[i]->link_index;
+                       break;
+               }
        }
 
        return idx;
 }
+
 /*
- * Get the Max Available BW or Max Estimated BW for each Host Router
+ * Get the maximum dp tunnel banwidth of host router
  *
- * @link: pointer to the dc_link struct instance
- * @type: ESTIMATD BW or MAX AVAILABLE BW
+ * @dc: pointer to the dc struct instance
+ * @hr_index: host router index
  *
- * return: response_ready flag from dc_link struct
+ * return: host router maximum dp tunnel bandwidth
  */
-static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
+static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
 {
-       const struct dc *dc_struct = link->dc;
-       uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
-       uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
-       struct dc_link *link_temp;
+       uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
+       uint8_t hr_index_temp = 0;
+       struct dc_link *link_dpia_primary, *link_dpia_secondary;
        int total_bw = 0;
-       int i;
-
-       for (i = 0; i < MAX_PIPES * 2; ++i) {
 
-               if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
-                       continue;
+       for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) {
 
-               link_temp = dc_struct->links[i];
-               if (!link_temp || !link_temp->hpd_status)
+               if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
                        continue;
 
-               idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;
-
-               if (idx_temp == idx) {
-
-                       if (type == HOST_ROUTER_BW_ESTIMATED)
-                               total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
-                       else if (type == HOST_ROUTER_BW_ALLOCATED)
-                               total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
+               hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
+
+               if (hr_index_temp == hr_index) {
+                       link_dpia_primary = dc->links[i];
+                       link_dpia_secondary = dc->links[i + 1];
+
+                       /**
+                        * If BW allocation enabled on both DPIAs, then
+                        * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
+                        * otherwise HR BW = Estimated(bw alloc enabled dpia)
+                        */
+                       if ((link_dpia_primary->hpd_status &&
+                               link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
+                               (link_dpia_secondary->hpd_status &&
+                               link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
+                                       total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
+                                               link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
+                       } else if (link_dpia_primary->hpd_status &&
+                                       link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
+                               total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
+                       } else if (link_dpia_secondary->hpd_status &&
+                               link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
+                               total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
+                       }
+                       break;
                }
        }
 
        return total_bw;
 }
+
 /*
  * Cleanup function for when the dpia is unplugged to reset struct
  * and perform any required clean up
@@ -194,42 +240,49 @@ static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
  *
  * return: none
  */
-static bool dpia_bw_alloc_unplug(struct dc_link *link)
+static void dpia_bw_alloc_unplug(struct dc_link *link)
 {
-       if (!link)
-               return true;
-
-       return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
-                       link->dpia_bw_alloc_config.sink_allocated_bw, link);
+       if (link) {
+               DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
+                       __func__, link->link_index);
+               reset_bw_alloc_struct(link);
+       }
 }
+
 static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
 {
        uint8_t requested_bw;
        uint32_t temp;
 
-       // 1. Add check for this corner case #1
-       if (req_bw > link->dpia_bw_alloc_config.estimated_bw)
+       /* Error check whether request bw greater than allocated */
+       if (req_bw > link->dpia_bw_alloc_config.estimated_bw) {
+               DC_LOG_ERROR("%s: Request bw greater than estimated bw for link(%d)\n",
+                       __func__, link->link_index);
                req_bw = link->dpia_bw_alloc_config.estimated_bw;
+       }
 
        temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
        requested_bw = temp / Kbps_TO_Gbps;
 
-       // Always make sure to add more to account for floating points
+       /* Always make sure to add more to account for floating points */
        if (temp % Kbps_TO_Gbps)
                ++requested_bw;
 
-       // 2. Add check for this corner case #2
+       /* Error check whether requested and allocated are equal */
        req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
-       if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw)
-               return;
+       if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
+               DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
+                       __func__, link->link_index);
+       }
 
-       if (core_link_write_dpcd(
+       link->dpia_bw_alloc_config.response_ready = false; // Reset flag
+       core_link_write_dpcd(
                link,
                REQUESTED_BW,
                &requested_bw,
-               sizeof(uint8_t)) == DC_OK)
-               link->dpia_bw_alloc_config.response_ready = false; // Reset flag
+               sizeof(uint8_t));
 }
+
 /*
  * Return the response_ready flag from dc_link struct
  *
@@ -241,6 +294,7 @@ static bool get_cm_response_ready_flag(struct dc_link *link)
 {
        return link->dpia_bw_alloc_config.response_ready;
 }
+
 // ------------------------------------------------------------------
 //                                     PUBLIC FUNCTIONS
 // ------------------------------------------------------------------
@@ -277,27 +331,27 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
                                DPTX_BW_ALLOCATION_MODE_CONTROL,
                                &response,
                                sizeof(uint8_t)) != DC_OK) {
-                       DC_LOG_DEBUG("%s: **** FAILURE Enabling DPtx BW Allocation Mode Support ***\n",
-                                       __func__);
+                       DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n",
+                               __func__, link->link_index);
                } else {
                        // SUCCESS Enabled DPtx BW Allocation Mode Support
-                       link->dpia_bw_alloc_config.bw_alloc_enabled = true;
-                       DC_LOG_DEBUG("%s: **** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n",
-                                       __func__);
+                       DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n",
+                               __func__, link->link_index);
 
                        ret = true;
                        init_usb4_bw_struct(link);
+                       link->dpia_bw_alloc_config.bw_alloc_enabled = true;
                }
        }
 
 out:
        return ret;
 }
+
 void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
 {
        int bw_needed = 0;
        int estimated = 0;
-       int host_router_total_estimated_bw = 0;
 
        if (!get_bw_alloc_proceed_flag((link)))
                return;
@@ -306,14 +360,22 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
 
        case DPIA_BW_REQ_FAILED:
 
-               DC_LOG_DEBUG("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__);
+               /*
+                * Ideally, we shouldn't run into this case as we always validate available
+                * bandwidth and request within that limit
+                */
+               estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+               DC_LOG_ERROR("%s: BW REQ FAILURE for DP-TX Request for link(%d)\n",
+                       __func__, link->link_index);
+               DC_LOG_ERROR("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
+                       __func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
 
-               // Update the new Estimated BW value updated by CM
-               link->dpia_bw_alloc_config.estimated_bw =
-                               bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+               /* Update the new Estimated BW value updated by CM */
+               link->dpia_bw_alloc_config.estimated_bw = estimated;
 
+               /* Allocate the previously requested bandwidth */
                set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
-               link->dpia_bw_alloc_config.response_ready = false;
 
                /*
                 * If FAIL then it is either:
@@ -326,68 +388,34 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
 
        case DPIA_BW_REQ_SUCCESS:
 
-               DC_LOG_DEBUG("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__);
-
-               // 1. SUCCESS 1st time before any Pruning is done
-               // 2. SUCCESS after prev. FAIL before any Pruning is done
-               // 3. SUCCESS after Pruning is done but before enabling link
-
                bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
 
-               // 1.
-               if (!link->dpia_bw_alloc_config.sink_allocated_bw) {
-
-                       allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed, link);
-                       link->dpia_bw_alloc_config.sink_verified_bw =
-                                       link->dpia_bw_alloc_config.sink_allocated_bw;
-
-                       // SUCCESS from first attempt
-                       if (link->dpia_bw_alloc_config.sink_allocated_bw >
-                       link->dpia_bw_alloc_config.sink_max_bw)
-                               link->dpia_bw_alloc_config.sink_verified_bw =
-                                               link->dpia_bw_alloc_config.sink_max_bw;
-               }
-               // 3.
-               else if (link->dpia_bw_alloc_config.sink_allocated_bw) {
-
-                       // Find out how much do we need to de-alloc
-                       if (link->dpia_bw_alloc_config.sink_allocated_bw > bw_needed)
-                               deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
-                                               link->dpia_bw_alloc_config.sink_allocated_bw - bw_needed, link);
-                       else
-                               allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
-                                               bw_needed - link->dpia_bw_alloc_config.sink_allocated_bw, link);
-               }
+               DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
+                       __func__, link->link_index);
+               DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
+                       __func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
 
-               // 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA
-               // => check if estimated_bw changed
+               link->dpia_bw_alloc_config.allocated_bw = bw_needed;
 
                link->dpia_bw_alloc_config.response_ready = true;
                break;
 
        case DPIA_EST_BW_CHANGED:
 
-               DC_LOG_DEBUG("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__);
-
                estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
-               host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED);
 
-               // 1. If due to unplug of other sink
-               if (estimated == host_router_total_estimated_bw) {
-                       // First update the estimated & max_bw fields
-                       if (link->dpia_bw_alloc_config.estimated_bw < estimated)
-                               link->dpia_bw_alloc_config.estimated_bw = estimated;
-               }
-               // 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw
-               else {
-                       // We lost estimated bw usually due to plug event of other dpia
-                       link->dpia_bw_alloc_config.estimated_bw = estimated;
-               }
+               DC_LOG_DEBUG("%s: ESTIMATED BW CHANGED for link(%d)\n",
+                       __func__, link->link_index);
+               DC_LOG_DEBUG("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
+                       __func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
+
+               link->dpia_bw_alloc_config.estimated_bw = estimated;
                break;
 
        case DPIA_BW_ALLOC_CAPS_CHANGED:
 
-               DC_LOG_DEBUG("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__);
+               DC_LOG_ERROR("%s: BW ALLOC CAPABILITY CHANGED to Disabled for link(%d)\n",
+                       __func__, link->link_index);
                link->dpia_bw_alloc_config.bw_alloc_enabled = false;
                break;
        }
@@ -405,21 +433,21 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
        if (link->hpd_status && peak_bw > 0) {
 
                // If DP over USB4 then we need to check BW allocation
-               link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
-               set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
+               link->dpia_bw_alloc_config.link_max_bw = peak_bw;
+               set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
 
                do {
-                       if (!(timeout > 0))
+                       if (timeout > 0)
                                timeout--;
                        else
                                break;
-                       fsleep(10 * 1000);
+                       msleep(10);
                } while (!get_cm_response_ready_flag(link));
 
                if (!timeout)
                        ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
-               else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
-                       ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
+               else if (link->dpia_bw_alloc_config.allocated_bw > 0)
+                       ret = link->dpia_bw_alloc_config.allocated_bw;
        }
        //2. Cold Unplug
        else if (!link->hpd_status)
@@ -428,65 +456,102 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
 out:
        return ret;
 }
-int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
+bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
 {
-       int ret = 0;
+       bool ret = false;
        uint8_t timeout = 10;
 
+       DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
+               __func__, link->link_index, link->hpd_status,
+               link->dpia_bw_alloc_config.allocated_bw, req_bw);
+
        if (!get_bw_alloc_proceed_flag(link))
                goto out;
 
-       /*
-        * Sometimes stream uses same timing parameters as the already
-        * allocated max sink bw so no need to re-alloc
-        */
-       if (req_bw != link->dpia_bw_alloc_config.sink_allocated_bw) {
-               set_usb4_req_bw_req(link, req_bw);
-               do {
-                       if (!(timeout > 0))
-                               timeout--;
-                       else
-                               break;
-                       udelay(10 * 1000);
-               } while (!get_cm_response_ready_flag(link));
+       set_usb4_req_bw_req(link, req_bw);
+       do {
+               if (timeout > 0)
+                       timeout--;
+               else
+                       break;
+               msleep(10);
+       } while (!get_cm_response_ready_flag(link));
 
-               if (!timeout)
-                       ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
-               else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
-                       ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
-       }
+       if (timeout)
+               ret = true;
 
 out:
+       DC_LOG_DEBUG("%s: EXIT: timeout(%d), ret(%d)\n", __func__, timeout, ret);
        return ret;
 }
+
 bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
 {
        bool ret = true;
-       int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
-       uint8_t lowest_dpia_index = 0, dpia_index = 0;
-       uint8_t i;
+       int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
+       uint8_t lowest_dpia_index, i, hr_index;
 
        if (!num_dpias || num_dpias > MAX_DPIA_NUM)
                return ret;
 
-       //Get total Host Router BW & Validate against each Host Router max BW
+       lowest_dpia_index = get_lowest_dpia_index(link[0]);
+
+       /* get total Host Router BW with granularity for the given modes */
        for (i = 0; i < num_dpias; ++i) {
+               int granularity_Gbps = 0;
+               int bw_granularity = 0;
 
                if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
                        continue;
 
-               lowest_dpia_index = get_lowest_dpia_index(link[i]);
                if (link[i]->link_index < lowest_dpia_index)
                        continue;
 
-               dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
-               bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
-               if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
+               granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
+               bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
+                               ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
 
-                       ret = false;
-                       break;
+               hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
+               bw_needed_per_hr[hr_index] += bw_granularity;
+       }
+
+       /* validate against each Host Router max BW */
+       for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
+               if (bw_needed_per_hr[hr_index]) {
+                       host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
+                       if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
+                               ret = false;
+                               break;
+                       }
                }
        }
 
        return ret;
 }
+
+int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
+{
+       int dp_overhead = 0, link_mst_overhead = 0;
+
+       if (!get_bw_alloc_proceed_flag((link)))
+               return dp_overhead;
+
+       /* if its mst link, add MTPH overhead */
+       if ((link->type == dc_connection_mst_branch) &&
+               !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+               /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
+                * MST overhead is 1/64 of link bandwidth (excluding any overhead)
+                */
+               const struct dc_link_settings *link_cap =
+                       dc_link_get_link_cap(link);
+               uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
+                                          (uint32_t)link_cap->lane_count *
+                                          LINK_RATE_REF_FREQ_IN_KHZ * 8;
+               link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
+       }
+
+       /* add all the overheads */
+       dp_overhead = link_mst_overhead;
+
+       return dp_overhead;
+}
index 7292690383ae1fe55188d58e285df83a81868d7a..3b6d8494f9d5da4ceb05711c9596007ac73f08a2 100644 (file)
@@ -59,9 +59,9 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
  * @link: pointer to the dc_link struct instance
  * @req_bw: Bw requested by the stream
  *
- * return: allocated bw else return 0
+ * return: true if allocated successfully
  */
-int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
+bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
 
 /*
  * Handle the USB4 BW Allocation related functionality here:
@@ -99,4 +99,13 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
  */
 bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
 
+/*
+ * Obtain all the DP overheads in dp tunneling for the dpia link
+ *
+ * @link: pointer to the dc_link struct instance
+ *
+ * return: DP overheads in DP tunneling
+ */
+int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
+
 #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
index 90339c2dfd84871bfc15a4169b20c01b7687aa97..5a0b0451895690d184ec00c56873f0d1acad6864 100644 (file)
@@ -807,7 +807,7 @@ void dp_decide_lane_settings(
                const struct link_training_settings *lt_settings,
                const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
                struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
-               union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+               union dpcd_training_lane *dpcd_lane_settings)
 {
        uint32_t lane;
 
index 7d027bac82551dd18a0f5faa878012441c66a818..851bd17317a0c4b05c5407e954e12a767c175111 100644 (file)
@@ -111,7 +111,7 @@ void dp_decide_lane_settings(
        const struct link_training_settings *lt_settings,
        const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
        struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
-       union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+       union dpcd_training_lane *dpcd_lane_settings);
 
 enum dc_dp_training_pattern decide_cr_training_pattern(
                const struct dc_link_settings *link_settings);
index bf53a86ea81718163a4746d8e6eadc1b60678889..046d3e205415311cd63a98aa3c0e59c8aaea2e89 100644 (file)
@@ -529,6 +529,9 @@ bool edp_set_backlight_level(const struct dc_link *link,
        if (dc_is_embedded_signal(link->connector_signal)) {
                struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
 
+               if (link->panel_cntl)
+                       link->panel_cntl->stored_backlight_registers.USER_LEVEL = backlight_pwm_u16_16;
+
                if (pipe_ctx) {
                        /* Disable brightness ramping when the display is blanked
                         * as it can hang the DMCU
@@ -927,8 +930,8 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
 bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
 {
        /* To-do: Setup Replay */
-       struct dc *dc = link->ctx->dc;
-       struct dmub_replay *replay = dc->res_pool->replay;
+       struct dc *dc;
+       struct dmub_replay *replay;
        int i;
        unsigned int panel_inst;
        struct replay_context replay_context = { 0 };
@@ -944,6 +947,10 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
        if (!link)
                return false;
 
+       dc = link->ctx->dc;
+
+       replay = dc->res_pool->replay;
+
        if (!replay)
                return false;
 
@@ -972,8 +979,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
 
        replay_context.line_time_in_ns = lineTimeInNs;
 
-       if (replay)
-               link->replay_settings.replay_feature_enabled =
+       link->replay_settings.replay_feature_enabled =
                        replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
        if (link->replay_settings.replay_feature_enabled) {
 
@@ -1065,6 +1071,33 @@ bool edp_replay_residency(const struct dc_link *link,
        return true;
 }
 
+bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
+       const unsigned int *power_opts, uint16_t coasting_vtotal)
+{
+       struct dc  *dc = link->ctx->dc;
+       struct dmub_replay *replay = dc->res_pool->replay;
+       unsigned int panel_inst;
+
+       if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+               return false;
+
+       /* Only both power and coasting vtotal changed, this func could return true */
+       if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts &&
+               coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
+               if (link->replay_settings.replay_feature_enabled &&
+                       replay->funcs->replay_set_power_opt_and_coasting_vtotal) {
+                       replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay,
+                               *power_opts, panel_inst, coasting_vtotal);
+                       link->replay_settings.replay_power_opt_active = *power_opts;
+                       link->replay_settings.coasting_vtotal = coasting_vtotal;
+               } else
+                       return false;
+       } else
+               return false;
+
+       return true;
+}
+
 static struct abm *get_abm_from_stream_res(const struct dc_link *link)
 {
        int i;
index b7493ff4fceefe049ff79499a47bc8caeb527c17..34e521af7bb482260539bcf66821741531ab17af 100644 (file)
@@ -63,6 +63,8 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
 bool edp_replay_residency(const struct dc_link *link,
        unsigned int *residency, const bool is_start, const bool is_alpm);
 bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
+bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
+       const unsigned int *power_opts, uint16_t coasting_vtotal);
 bool edp_wait_for_t12(struct dc_link *link);
 bool edp_is_ilr_optimization_required(struct dc_link *link,
        struct dc_crtc_timing *crtc_timing);
index a2c4db2cebdd6c742a91e20d77869c5cd94b7ee1..82349354332548e160494c23bee15acaa18b7630 100644 (file)
@@ -166,6 +166,16 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
+       REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
+                       OPTC_SEG0_SRC_SEL, 0xf,
+                       OPTC_SEG1_SRC_SEL, 0xf,
+                       OPTC_SEG2_SRC_SEL, 0xf,
+                       OPTC_SEG3_SRC_SEL, 0xf,
+                       OPTC_NUM_OF_INPUT_SEGMENT, 0);
+
+       REG_UPDATE(OPTC_MEMORY_CONFIG,
+                       OPTC_MEM_SEL, 0);
+
        /* disable otg request until end of the first line
         * in the vertical blank region
         */
@@ -198,6 +208,13 @@ static void optc32_disable_phantom_otg(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
+       REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
+                       OPTC_SEG0_SRC_SEL, 0xf,
+                       OPTC_SEG1_SRC_SEL, 0xf,
+                       OPTC_SEG2_SRC_SEL, 0xf,
+                       OPTC_SEG3_SRC_SEL, 0xf,
+                       OPTC_NUM_OF_INPUT_SEGMENT, 0);
+
        REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
 }
 
index a4a39f1638cf26cecd82590c23725d70112d4d38..5b154750885030e171a483d30e014aa8f4bff8a1 100644 (file)
@@ -138,6 +138,16 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
+       REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
+                       OPTC_SEG0_SRC_SEL, 0xf,
+                       OPTC_SEG1_SRC_SEL, 0xf,
+                       OPTC_SEG2_SRC_SEL, 0xf,
+                       OPTC_SEG3_SRC_SEL, 0xf,
+                       OPTC_NUM_OF_INPUT_SEGMENT, 0);
+
+       REG_UPDATE(OPTC_MEMORY_CONFIG,
+                       OPTC_MEM_SEL, 0);
+
        /* disable otg request until end of the first line
         * in the vertical blank region
         */
index f6cbcc9b4006952aabb3a08504d6b7e0ef1e457f..c4d71e7f18af47ba47dbc89e1a9098a0a4eade04 100644 (file)
@@ -89,6 +89,8 @@
 #include "dcn20/dcn20_vmid.h"
 #include "dml/dcn32/dcn32_fpu.h"
 
+#include "dc_state_priv.h"
+
 #include "dml2/dml2_wrapper.h"
 
 #define DC_LOGGER_INIT(logger)
@@ -1644,7 +1646,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
                if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
                        phantom_plane = prev_phantom_plane;
                else
-                       phantom_plane = dc_create_plane_state(dc);
+                       phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state);
 
                memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
                memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
@@ -1665,9 +1667,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
                phantom_plane->clip_rect.y = 0;
                phantom_plane->clip_rect.height = phantom_stream->src.height;
 
-               phantom_plane->is_phantom = true;
-
-               dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
+               dc_state_add_phantom_plane(dc, phantom_stream, phantom_plane, context);
 
                curr_pipe = curr_pipe->bottom_pipe;
                prev_phantom_plane = phantom_plane;
@@ -1683,13 +1683,7 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
        struct dc_stream_state *phantom_stream = NULL;
        struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
 
-       phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
-       phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
-       phantom_stream->dpms_off = true;
-       phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
-       phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
-       ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
-       ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+       phantom_stream = dc_state_create_phantom_stream(dc, context, ref_pipe->stream);
 
        /* stream has limited viewport and small timing */
        memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
@@ -1699,81 +1693,10 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
        dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx);
        DC_FP_END();
 
-       dc_add_stream_to_ctx(dc, context, phantom_stream);
+       dc_state_add_phantom_stream(dc, context, phantom_stream, ref_pipe->stream);
        return phantom_stream;
 }
 
-void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context)
-{
-       int i;
-       struct dc_plane_state *phantom_plane = NULL;
-       struct dc_stream_state *phantom_stream = NULL;
-
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
-               if (resource_is_pipe_type(pipe, OTG_MASTER) &&
-                               resource_is_pipe_type(pipe, DPP_PIPE) &&
-                               pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
-                       phantom_plane = pipe->plane_state;
-                       phantom_stream = pipe->stream;
-
-                       dc_plane_state_retain(phantom_plane);
-                       dc_stream_retain(phantom_stream);
-               }
-       }
-}
-
-// return true if removed piped from ctx, false otherwise
-bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool fast_update)
-{
-       int i;
-       bool removed_pipe = false;
-       struct dc_plane_state *phantom_plane = NULL;
-       struct dc_stream_state *phantom_stream = NULL;
-
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-               // build scaling params for phantom pipes
-               if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
-                       phantom_plane = pipe->plane_state;
-                       phantom_stream = pipe->stream;
-
-                       dc_rem_all_planes_for_stream(dc, pipe->stream, context);
-                       dc_remove_stream_from_ctx(dc, context, pipe->stream);
-
-                       /* Ref count is incremented on allocation and also when added to the context.
-                        * Therefore we must call release for the the phantom plane and stream once
-                        * they are removed from the ctx to finally decrement the refcount to 0 to free.
-                        */
-                       dc_plane_state_release(phantom_plane);
-                       dc_stream_release(phantom_stream);
-
-                       removed_pipe = true;
-               }
-
-               /* For non-full updates, a shallow copy of the current state
-                * is created. In this case we don't want to erase the current
-                * state (there can be 2 HIRQL threads, one in flip, and one in
-                * checkMPO) that can cause a race condition.
-                *
-                * This is just a workaround, needs a proper fix.
-                */
-               if (!fast_update) {
-                       // Clear all phantom stream info
-                       if (pipe->stream) {
-                               pipe->stream->mall_stream_config.type = SUBVP_NONE;
-                               pipe->stream->mall_stream_config.paired_stream = NULL;
-                       }
-
-                       if (pipe->plane_state) {
-                               pipe->plane_state->is_phantom = false;
-                       }
-               }
-       }
-       return removed_pipe;
-}
-
 /* TODO: Input to this function should indicate which pipe indexes (or streams)
  * require a phantom pipe / stream
  */
@@ -1798,7 +1721,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
                // We determine which phantom pipes were added by comparing with
                // the phantom stream.
                if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream &&
-                               pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+                               dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
                        pipe->stream->use_dynamic_meta = false;
                        pipe->plane_state->flip_immediate = false;
                        if (!resource_build_scaling_params(pipe)) {
@@ -1817,7 +1740,6 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
        int vlevel = 0;
        int pipe_cnt = 0;
        display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
-       struct mall_temp_config mall_temp_config;
 
        /* To handle Freesync properly, setting FreeSync DML parameters
         * to its default state for the first stage of validation
@@ -1827,29 +1749,12 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
 
        DC_LOGGER_INIT(dc->ctx->logger);
 
-       /* For fast validation, there are situations where a shallow copy of
-        * of the dc->current_state is created for the validation. In this case
-        * we want to save and restore the mall config because we always
-        * teardown subvp at the beginning of validation (and don't attempt
-        * to add it back if it's fast validation). If we don't restore the
-        * subvp config in cases of fast validation + shallow copy of the
-        * dc->current_state, the dc->current_state will have a partially
-        * removed subvp state when we did not intend to remove it.
-        */
-       if (fast_validate) {
-               memset(&mall_temp_config, 0, sizeof(mall_temp_config));
-               dcn32_save_mall_state(dc, context, &mall_temp_config);
-       }
-
        BW_VAL_TRACE_COUNT();
 
        DC_FP_START();
        out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
        DC_FP_END();
 
-       if (fast_validate)
-               dcn32_restore_mall_state(dc, context, &mall_temp_config);
-
        if (pipe_cnt == 0)
                goto validate_out;
 
@@ -1933,7 +1838,7 @@ int dcn32_populate_dml_pipes_from_context(
                 * This is just a workaround -- needs a proper fix.
                 */
                if (!fast_validate) {
-                       switch (pipe->stream->mall_stream_config.type) {
+                       switch (dc_state_get_pipe_subvp_type(context, pipe)) {
                        case SUBVP_MAIN:
                                pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport;
                                subvp_in_use = true;
@@ -2037,10 +1942,6 @@ static struct resource_funcs dcn32_res_pool_funcs = {
        .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
        .update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
        .add_phantom_pipes = dcn32_add_phantom_pipes,
-       .remove_phantom_pipes = dcn32_remove_phantom_pipes,
-       .retain_phantom_pipes = dcn32_retain_phantom_pipes,
-       .save_mall_state = dcn32_save_mall_state,
-       .restore_mall_state = dcn32_restore_mall_state,
        .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
 };
 
@@ -2454,16 +2355,19 @@ static bool dcn32_resource_construct(
        dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
 
        dc->dml2_options.svp_pstate.callbacks.dc = dc;
-       dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
-       dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx;
+       dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+       dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
        dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
-       dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state;
-       dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context;
-       dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx;
-       dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink;
-       dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release;
-       dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release;
+       dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+       dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+       dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+       dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+       dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+       dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
        dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
+       dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+       dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+       dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
 
        dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
        dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
index 9ca799da1a56e0e38466440c14251fa25ec64ca1..0c87b0fabba7d96ff38180900e41f1438419912c 100644 (file)
@@ -91,12 +91,6 @@ bool dcn32_release_post_bldn_3dlut(
                struct dc_3dlut **lut,
                struct dc_transfer_func **shaper);
 
-bool dcn32_remove_phantom_pipes(struct dc *dc,
-               struct dc_state *context, bool fast_update);
-
-void dcn32_retain_phantom_pipes(struct dc *dc,
-               struct dc_state *context);
-
 void dcn32_add_phantom_pipes(struct dc *dc,
                struct dc_state *context,
                display_e2e_pipe_params_st *pipes,
@@ -169,15 +163,7 @@ void dcn32_determine_det_override(struct dc *dc,
 void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
        display_e2e_pipe_params_st *pipes);
 
-void dcn32_save_mall_state(struct dc *dc,
-               struct dc_state *context,
-               struct mall_temp_config *temp_config);
-
-void dcn32_restore_mall_state(struct dc *dc,
-               struct dc_state *context,
-               struct mall_temp_config *temp_config);
-
-struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context);
+struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
 
 bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
 
index 12986fe0b2892669e8757a70bd8c87b3500ee1e1..74412e5f03fefbaa9350982ac92bc528cc8e80e8 100644 (file)
@@ -92,6 +92,8 @@
 #include "vm_helper.h"
 #include "dcn20/dcn20_vmid.h"
 
+#include "dc_state_priv.h"
+
 #define DC_LOGGER_INIT(logger)
 
 enum dcn321_clk_src_array_id {
@@ -1605,10 +1607,6 @@ static struct resource_funcs dcn321_res_pool_funcs = {
        .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
        .update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
        .add_phantom_pipes = dcn32_add_phantom_pipes,
-       .remove_phantom_pipes = dcn32_remove_phantom_pipes,
-       .retain_phantom_pipes = dcn32_retain_phantom_pipes,
-       .save_mall_state = dcn32_save_mall_state,
-       .restore_mall_state = dcn32_restore_mall_state,
        .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
 };
 
@@ -2008,16 +2006,19 @@ static bool dcn321_resource_construct(
        dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
 
        dc->dml2_options.svp_pstate.callbacks.dc = dc;
-       dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
-       dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx;
+       dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+       dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
        dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
-       dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state;
-       dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context;
-       dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx;
-       dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink;
-       dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release;
-       dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release;
+       dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+       dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+       dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+       dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+       dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+       dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
        dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
+       dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+       dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+       dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
 
        dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
        dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
index 4e1db842b98cc1197e4be4f089b2babd220e821c..761ec989187568730fdd8cd51cd1802fa657be9c 100644 (file)
 #include "reg_helper.h"
 #include "dce/dmub_abm.h"
 #include "dce/dmub_psr.h"
+#include "dce/dmub_replay.h"
 #include "dce/dce_aux.h"
 #include "dce/dce_i2c.h"
 #include "dml/dcn31/display_mode_vba_31.h" /*temp*/
 #include "vm_helper.h"
 #include "dcn20/dcn20_vmid.h"
 
+#include "dc_state_priv.h"
+
 #include "link_enc_cfg.h"
 #define DC_LOGGER_INIT(logger)
 
@@ -766,7 +769,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .enable_hpo_pg_support = false,
        .enable_legacy_fast_update = true,
        .enable_single_display_2to1_odm_policy = false,
-       .disable_idle_power_optimizations = true,
+       .disable_idle_power_optimizations = false,
        .dmcub_emulation = false,
        .disable_boot_optimizations = false,
        .disable_unbounded_requesting = false,
@@ -778,13 +781,15 @@ static const struct dc_debug_options debug_defaults_drv = {
        .ignore_pg = true,
        .psp_disabled_wa = true,
        .ips2_eval_delay_us = 200,
-       .ips2_entry_delay_us = 400
+       .ips2_entry_delay_us = 400,
+       .static_screen_wait_frames = 2,
 };
 
 static const struct dc_panel_config panel_config_defaults = {
        .psr = {
                .disable_psr = false,
                .disallow_psrsu = false,
+               .disallow_replay = false,
        },
        .ilr = {
                .optimize_edp_link_rate = true,
@@ -1543,6 +1548,9 @@ static void dcn35_resource_destruct(struct dcn35_resource_pool *pool)
        if (pool->base.psr != NULL)
                dmub_psr_destroy(&pool->base.psr);
 
+       if (pool->base.replay != NULL)
+               dmub_replay_destroy(&pool->base.replay);
+
        if (pool->base.pg_cntl != NULL)
                dcn_pg_cntl_destroy(&pool->base.pg_cntl);
 
@@ -2027,6 +2035,14 @@ static bool dcn35_resource_construct(
                goto create_fail;
        }
 
+       /* Replay */
+       pool->base.replay = dmub_replay_create(ctx);
+       if (pool->base.replay == NULL) {
+               dm_error("DC: failed to create replay obj!\n");
+               BREAK_TO_DEBUGGER();
+               goto create_fail;
+       }
+
        /* ABM */
        for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
                pool->base.multiple_abms[i] = dmub_abm_create(ctx,
index d1a4ed6f5916ebe2df689faa7337211e42c2359b..c78c9224ab6060493a454683423c9d2a3b27e9a1 100644 (file)
@@ -86,6 +86,7 @@ enum dmub_status {
        DMUB_STATUS_TIMEOUT,
        DMUB_STATUS_INVALID,
        DMUB_STATUS_HW_FAILURE,
+       DMUB_STATUS_POWER_STATE_D3
 };
 
 /* enum dmub_asic - dmub asic identifier */
index 3c092064c72eca4aad2b97403c47ca29c68ba757..c64b6c848ef7219e3ddc44da8d4e56763a9bf7f4 100644 (file)
@@ -653,7 +653,7 @@ union dmub_fw_boot_options {
                uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
                uint32_t usb4_cm_version: 1; /**< 1 CM support */
                uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
-               uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */
+               uint32_t reserved0: 1;
                uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
                uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
                uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
index 53ac1c66dd86ef8d1b8376973669e7e35cc2ad3e..9ad738805320deeba210f6c103459617e553768f 100644 (file)
@@ -768,7 +768,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
                return DMUB_STATUS_INVALID;
 
        if (dmub->power_state != DMUB_POWER_STATE_D0)
-               return DMUB_STATUS_INVALID;
+               return DMUB_STATUS_POWER_STATE_D3;
 
        if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
            dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
@@ -789,7 +789,7 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
                return DMUB_STATUS_INVALID;
 
        if (dmub->power_state != DMUB_POWER_STATE_D0)
-               return DMUB_STATUS_INVALID;
+               return DMUB_STATUS_POWER_STATE_D3;
 
        /**
         * Read back all the queued commands to ensure that they've
index 66a54da0641ce11feb10e1d777395f9bcd85f658..915a031a43cb286fdb03f2fb2788d0fa9e539b59 100644 (file)
@@ -64,7 +64,7 @@ enum audio_dto_source {
 /* PLL information required for AZALIA DTO calculation */
 
 struct audio_pll_info {
-       uint32_t dp_dto_source_clock_in_khz;
+       uint32_t audio_dto_source_clock_in_khz;
        uint32_t feed_back_divider;
        enum audio_dto_source dto_source;
        bool ss_enabled;
index 84da1dd34efd18e90b1112c2b5f9de4cd5755a10..d4cf7ead1d877e7bfd1a75fa651c81e6062cefc7 100644 (file)
@@ -69,18 +69,6 @@ static const struct fixed31_32 dc_fixpt_epsilon = { 1LL };
 static const struct fixed31_32 dc_fixpt_half = { 0x80000000LL };
 static const struct fixed31_32 dc_fixpt_one = { 0x100000000LL };
 
-static inline struct fixed31_32 dc_fixpt_from_s3132(__u64 x)
-{
-       struct fixed31_32 val;
-
-       /* If negative, convert to 2's complement. */
-       if (x & (1ULL << 63))
-               x = -(x & ~(1ULL << 63));
-
-       val.value = x;
-       return val;
-}
-
 /*
  * @brief
  * Initialization routines
index bc96d02113608027ba21bd1234e2f270b11549c0..813463ffe15c52febad7e0cb8834b399968097ce 100644 (file)
@@ -417,6 +417,8 @@ struct integrated_info {
        /* V2.1 */
        struct edp_info edp1_info;
        struct edp_info edp2_info;
+       uint32_t gpuclk_ss_percentage;
+       uint32_t gpuclk_ss_type;
 };
 
 /*
index 47296d155c3a5ab8ef0e376dc7d1c2452477d2cd..3955b7e4b2e2e41b751717fa0dd5ed7e4730b8dd 100644 (file)
@@ -81,6 +81,7 @@ fail_alloc_context:
 void mod_freesync_destroy(struct mod_freesync *mod_freesync)
 {
        struct core_freesync *core_freesync = NULL;
+
        if (mod_freesync == NULL)
                return;
        core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
@@ -278,9 +279,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
                }
        } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
                /* Enter Below the Range */
-               if (!in_out_vrr->btr.btr_active) {
+               if (!in_out_vrr->btr.btr_active)
                        in_out_vrr->btr.btr_active = true;
-               }
        }
 
        /* BTR set to "not active" so disengage */
index 1ddb4f5eac8e538808bd8bf82b8ca2408a22dc45..182e7532dda8a177748c61abb984cbb36977eda3 100644 (file)
@@ -63,6 +63,7 @@ static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
 static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
 {
        enum mod_hdcp_status status;
+
        if (is_dp_hdcp(hdcp)) {
                status = (hdcp->auth.msg.hdcp1.bstatus &
                                DP_BSTATUS_R0_PRIME_READY) ?
@@ -131,9 +132,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
 static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
 {
        /* Avoid device count == 0 to do authentication */
-       if (0 == get_device_count(hdcp)) {
+       if (get_device_count(hdcp) == 0)
                return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
-       }
 
        /* Some MST display may choose to report the internal panel as an HDCP RX.
         * To update this condition with 1(because the immediate repeater's internal
index 91c22b96ebde7fc5446e78f2557ac3bb33b56020..733f22bed021987e5e2efa5422bd5f6ff57725ee 100644 (file)
@@ -208,9 +208,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
 static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
 {
        /* Avoid device count == 0 to do authentication */
-       if (0 == get_device_count(hdcp)) {
+       if (get_device_count(hdcp) == 0)
                return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
-       }
 
        /* Some MST display may choose to report the internal panel as an HDCP RX.   */
        /* To update this condition with 1(because the immediate repeater's internal */
@@ -689,9 +688,8 @@ static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp,
        if (is_hdmi_dvi_sl_hdcp(hdcp)) {
                if (!process_rxstatus(hdcp, event_ctx, input, &status))
                        goto out;
-               if (event_ctx->rx_id_list_ready) {
+               if (event_ctx->rx_id_list_ready)
                        goto out;
-               }
        }
        if (is_hdmi_dvi_sl_hdcp(hdcp))
                if (!mod_hdcp_execute_and_set(check_stream_ready_available,
index c62df3bcc7cb3c45952c3510377355ae003e00ff..1d83c1b9da10461b3f44b06608ceb1f07638669f 100644 (file)
 #define HDCP_CPIRQ_TRACE(hdcp) \
                HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index)
 #define HDCP_EVENT_TRACE(hdcp, event) \
-               if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
-                       HDCP_TIMEOUT_TRACE(hdcp); \
-               else if (event == MOD_HDCP_EVENT_CPIRQ) \
-                       HDCP_CPIRQ_TRACE(hdcp)
+               do { \
+                       if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
+                               HDCP_TIMEOUT_TRACE(hdcp); \
+                       else if (event == MOD_HDCP_EVENT_CPIRQ) \
+                               HDCP_CPIRQ_TRACE(hdcp); \
+               } while (0)
 /* TODO: find some way to tell if logging is off to save time */
 #define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \
                mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
index ee67a35c2a8eddab3d27f4403054032aa93c187b..8c137d7c032e1ffa5d4e2ac288424ba4edba3008 100644 (file)
@@ -443,7 +443,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
 
                if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
-                               continue;
+                       continue;
 
                memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -926,7 +926,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
 
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
                if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
-                               continue;
+                       continue;
 
                hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
                hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
index 5b71bc96b98c50a5dfc80a932e2823570c930802..7844ea91650bf397062a166d7feee700b1fece81 100644 (file)
@@ -98,9 +98,9 @@ enum ta_dtm_encoder_type {
  * This enum defines software value for dio_output_type
  */
 typedef enum {
-    TA_DTM_DIO_OUTPUT_TYPE__INVALID,
-    TA_DTM_DIO_OUTPUT_TYPE__DIRECT,
-    TA_DTM_DIO_OUTPUT_TYPE__DPIA
+       TA_DTM_DIO_OUTPUT_TYPE__INVALID,
+       TA_DTM_DIO_OUTPUT_TYPE__DIRECT,
+       TA_DTM_DIO_OUTPUT_TYPE__DPIA
 } ta_dtm_dio_output_type;
 
 struct ta_dtm_topology_update_input_v3 {
@@ -237,11 +237,11 @@ enum ta_hdcp2_hdcp2_msg_id_max_size {
 #define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127
 #define TA_HDCP__HDCP1_V_PRIME_SIZE 20
 #define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE                                                                                 \
-       TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6
+       (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6)
 
 // 64 bits boundaries
 #define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE                                                                                 \
-       TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4
+       (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4)
 
 enum ta_hdcp_status {
        TA_HDCP_STATUS__SUCCESS = 0x00,
index afe1f6cce5289d74d9b5226a407a052ebfff29fc..cc3dc9b589f683bc747fcf08d5d151406f12370a 100644 (file)
@@ -1,31 +1,3 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-
-
-
 /*
  * Copyright 2016 Advanced Micro Devices, Inc.
  *
index 84f9b412a4f1172d17502395d0f418bae8233d1a..738ee763f24a516153bf94ba5beaabec1f78fd9f 100644 (file)
@@ -147,12 +147,15 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
        }
 
        /* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
-       if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
-               vsc_packet_revision = vsc_packet_rev4;
-       else if (stream->link->replay_settings.config.replay_supported)
+       if (stream->link->psr_settings.psr_feature_enabled) {
+               if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+                       vsc_packet_revision = vsc_packet_rev4;
+               else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+                       vsc_packet_revision = vsc_packet_rev2;
+       }
+
+       if (stream->link->replay_settings.config.replay_supported)
                vsc_packet_revision = vsc_packet_rev4;
-       else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
-               vsc_packet_revision = vsc_packet_rev2;
 
        /* Update to revision 5 for extended colorimetry support */
        if (stream->use_vsc_sdp_for_colorimetry)
index 1675314a3ff20856519666689fb56d36a27c60b8..ad98e504c00de5908ca94a38392ef818e91b2152 100644 (file)
@@ -31,7 +31,7 @@
 
 #define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
 #define bswap16_based_on_endian(big_endian, value) \
-       (big_endian) ? cpu_to_be16(value) : cpu_to_le16(value)
+       ((big_endian) ? cpu_to_be16(value) : cpu_to_le16(value))
 
 /* Possible Min Reduction config from least aggressive to most aggressive
  *  0    1     2     3     4     5     6     7     8     9     10    11   12
@@ -973,6 +973,34 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
        return true;
 }
 
+void set_replay_coasting_vtotal(struct dc_link *link,
+       enum replay_coasting_vtotal_type type,
+       uint16_t vtotal)
+{
+       link->replay_settings.coasting_vtotal_table[type] = vtotal;
+}
+
+void calculate_replay_link_off_frame_count(struct dc_link *link,
+       uint16_t vtotal, uint16_t htotal)
+{
+       uint8_t max_link_off_frame_count = 0;
+       uint16_t max_deviation_line = 0,  pixel_deviation_per_line = 0;
+
+       max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
+       pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line;
+
+       if (htotal != 0 && vtotal != 0)
+               max_link_off_frame_count = htotal * max_deviation_line / (pixel_deviation_per_line * vtotal);
+       else
+               ASSERT(0);
+
+       link->replay_settings.link_off_frame_count_level =
+               max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_BEST ? PR_LINK_OFF_FRAME_COUNT_BEST :
+               max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_GOOD ? PR_LINK_OFF_FRAME_COUNT_GOOD :
+               PR_LINK_OFF_FRAME_COUNT_FAIL;
+
+}
+
 bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps)
 {
        unsigned int data_points_size;
index d9e0d67d67f703b23f0b124779082e05a53b43db..c17bbc6fb38cafb518777b16c96a99b2116c36eb 100644 (file)
@@ -54,6 +54,11 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
                unsigned int inst);
 
 void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
+void set_replay_coasting_vtotal(struct dc_link *link,
+       enum replay_coasting_vtotal_type type,
+       uint16_t vtotal);
+void calculate_replay_link_off_frame_count(struct dc_link *link,
+       uint16_t vtotal, uint16_t htotal);
 
 bool is_psr_su_specific_panel(struct dc_link *link);
 void mod_power_calc_psr_configs(struct psr_config *psr_config,
index bf7f258c324a1c52047f490bf3de8b42e8b87ec4..1dc5dd9b7bf70b10641a76e4c731e3e735aeaeef 100644 (file)
@@ -244,7 +244,6 @@ enum DC_FEATURE_MASK {
        DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
        DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
        DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
-       DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
 };
 
 enum DC_DEBUG_MASK {
@@ -255,9 +254,10 @@ enum DC_DEBUG_MASK {
        DC_DISABLE_PSR = 0x10,
        DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
        DC_DISABLE_MPO = 0x40,
-       DC_DISABLE_REPLAY = 0x50,
        DC_ENABLE_DPIA_TRACE = 0x80,
        DC_ENABLE_DML2 = 0x100,
+       DC_DISABLE_PSR_SU = 0x200,
+       DC_DISABLE_REPLAY = 0x400,
 };
 
 enum amd_dpm_forced_level;
index 7ee3d291120d5429d879745c2e63af38cd79f371..6f80bfa7e41ac9c1bdd2faaba4c298cc3f4f9d34 100644 (file)
 #define regBIF_BX1_MM_CFGREGS_CNTL_BASE_IDX                                                             2
 #define regBIF_BX1_BX_RESET_CNTL                                                                        0x00f0
 #define regBIF_BX1_BX_RESET_CNTL_BASE_IDX                                                               2
-#define regBIF_BX1_INTERRUPT_CNTL                                                                       0x8e11
-#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX                                                              5
-#define regBIF_BX1_INTERRUPT_CNTL2                                                                      0x8e12
-#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX                                                             5
+#define regBIF_BX1_INTERRUPT_CNTL                                                                       0x00f1
+#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX                                                              2
+#define regBIF_BX1_INTERRUPT_CNTL2                                                                      0x00f2
+#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX                                                             2
 #define regBIF_BX1_CLKREQB_PAD_CNTL                                                                     0x00f8
 #define regBIF_BX1_CLKREQB_PAD_CNTL_BASE_IDX                                                            2
 #define regBIF_BX1_BIF_FEATURES_CONTROL_MISC                                                            0x00fb
index 36a5ad8c00c59a4ec0348670dd17194eb88cabe9..edcb85560cede5b42989eb7337e04c9df1fb7233 100644 (file)
@@ -318,6 +318,7 @@ enum pp_xgmi_plpd_mode {
 #define MAX_GFX_CLKS 8
 #define MAX_CLKS 4
 #define NUM_VCN 4
+#define NUM_JPEG_ENG 32
 
 struct seq_file;
 enum amd_pp_clock_type;
@@ -775,6 +776,85 @@ struct gpu_metrics_v1_4 {
        uint16_t                        padding;
 };
 
+struct gpu_metrics_v1_5 {
+       struct metrics_table_header     common_header;
+
+       /* Temperature (Celsius) */
+       uint16_t                        temperature_hotspot;
+       uint16_t                        temperature_mem;
+       uint16_t                        temperature_vrsoc;
+
+       /* Power (Watts) */
+       uint16_t                        curr_socket_power;
+
+       /* Utilization (%) */
+       uint16_t                        average_gfx_activity;
+       uint16_t                        average_umc_activity; // memory controller
+       uint16_t                        vcn_activity[NUM_VCN];
+       uint16_t                        jpeg_activity[NUM_JPEG_ENG];
+
+       /* Energy (15.259uJ (2^-16) units) */
+       uint64_t                        energy_accumulator;
+
+       /* Driver attached timestamp (in ns) */
+       uint64_t                        system_clock_counter;
+
+       /* Throttle status */
+       uint32_t                        throttle_status;
+
+       /* Clock Lock Status. Each bit corresponds to clock instance */
+       uint32_t                        gfxclk_lock_status;
+
+       /* Link width (number of lanes) and speed (in 0.1 GT/s) */
+       uint16_t                        pcie_link_width;
+       uint16_t                        pcie_link_speed;
+
+       /* XGMI bus width and bitrate (in Gbps) */
+       uint16_t                        xgmi_link_width;
+       uint16_t                        xgmi_link_speed;
+
+       /* Utilization Accumulated (%) */
+       uint32_t                        gfx_activity_acc;
+       uint32_t                        mem_activity_acc;
+
+       /*PCIE accumulated bandwidth (GB/sec) */
+       uint64_t                        pcie_bandwidth_acc;
+
+       /*PCIE instantaneous bandwidth (GB/sec) */
+       uint64_t                        pcie_bandwidth_inst;
+
+       /* PCIE L0 to recovery state transition accumulated count */
+       uint64_t                        pcie_l0_to_recov_count_acc;
+
+       /* PCIE replay accumulated count */
+       uint64_t                        pcie_replay_count_acc;
+
+       /* PCIE replay rollover accumulated count */
+       uint64_t                        pcie_replay_rover_count_acc;
+
+       /* PCIE NAK sent  accumulated count */
+       uint32_t                        pcie_nak_sent_count_acc;
+
+       /* PCIE NAK received accumulated count */
+       uint32_t                        pcie_nak_rcvd_count_acc;
+
+       /* XGMI accumulated data transfer size(KiloBytes) */
+       uint64_t                        xgmi_read_data_acc[NUM_XGMI_LINKS];
+       uint64_t                        xgmi_write_data_acc[NUM_XGMI_LINKS];
+
+       /* PMFW attached timestamp (10ns resolution) */
+       uint64_t                        firmware_timestamp;
+
+       /* Current clocks (Mhz) */
+       uint16_t                        current_gfxclk[MAX_GFX_CLKS];
+       uint16_t                        current_socclk[MAX_CLKS];
+       uint16_t                        current_vclk0[MAX_CLKS];
+       uint16_t                        current_dclk0[MAX_CLKS];
+       uint16_t                        current_uclk;
+
+       uint16_t                        padding;
+};
+
 /*
  * gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
  * Use gpu_metrics_v2_1 or later instead.
index 2cd995b0cebacb12ad12cdb857e609e746a19c7b..087d57850304c45193a7f5de336953c1dec9cbba 100644 (file)
@@ -2168,7 +2168,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
                if (amdgpu_dpm_is_overdrive_supported(adev))
                        *states = ATTR_STATE_SUPPORTED;
        } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
-               if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
+               if ((adev->flags & AMD_IS_APU &&
+                    gc_ver != IP_VERSION(9, 4, 3)) ||
+                   gc_ver == IP_VERSION(9, 0, 1))
                        *states = ATTR_STATE_UNSUPPORTED;
        } else if (DEVICE_ATTR_IS(pcie_bw)) {
                /* PCIe Perf counters won't work on APU nodes */
@@ -4347,11 +4349,19 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
        if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
                seq_printf(m, "\t%u mV (VDDNB)\n", value);
        size = sizeof(uint32_t);
-       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
-               seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff);
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
+               if (adev->flags & AMD_IS_APU)
+                       seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
+               else
+                       seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
+       }
        size = sizeof(uint32_t);
-       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
-               seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff);
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
+               if (adev->flags & AMD_IS_APU)
+                       seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
+               else
+                       seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
+       }
        size = sizeof(value);
        seq_printf(m, "\n");
 
@@ -4377,9 +4387,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
                /* VCN clocks */
                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
                        if (!value) {
-                               seq_printf(m, "VCN: Disabled\n");
+                               seq_printf(m, "VCN: Powered down\n");
                        } else {
-                               seq_printf(m, "VCN: Enabled\n");
+                               seq_printf(m, "VCN: Powered up\n");
                                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
                                        seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
                                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
@@ -4391,9 +4401,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
                /* UVD clocks */
                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
                        if (!value) {
-                               seq_printf(m, "UVD: Disabled\n");
+                               seq_printf(m, "UVD: Powered down\n");
                        } else {
-                               seq_printf(m, "UVD: Enabled\n");
+                               seq_printf(m, "UVD: Powered up\n");
                                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
                                        seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
                                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
@@ -4405,9 +4415,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
                /* VCE clocks */
                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
                        if (!value) {
-                               seq_printf(m, "VCE: Disabled\n");
+                               seq_printf(m, "VCE: Powered down\n");
                        } else {
-                               seq_printf(m, "VCE: Enabled\n");
+                               seq_printf(m, "VCE: Powered up\n");
                                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
                                        seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
                        }
index f2a55c1413f597a4d643d1a2c99367517bdff17e..17882f8dfdd34f92d5d37a9b0ee37f4a7d1bb406 100644 (file)
@@ -200,7 +200,7 @@ static int get_platform_power_management_table(
                struct pp_hwmgr *hwmgr,
                ATOM_Tonga_PPM_Table *atom_ppm_table)
 {
-       struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
+       struct phm_ppm_table *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
        struct phm_ppt_v1_information *pp_table_information =
                (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
index 11372fcc59c8fe8daa93c750b779f8a9d0bbe3bf..aa91730e4eaffdf7760c844a7722aa1dedcb42d9 100644 (file)
@@ -2974,6 +2974,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                result = smu7_get_evv_voltages(hwmgr);
                if (result) {
                        pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
+                       kfree(hwmgr->backend);
+                       hwmgr->backend = NULL;
                        return -EINVAL;
                }
        } else {
@@ -3019,8 +3021,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        }
 
        result = smu7_update_edc_leakage_table(hwmgr);
-       if (result)
+       if (result) {
+               smu7_hwmgr_backend_fini(hwmgr);
                return result;
+       }
 
        return 0;
 }
@@ -3995,6 +3999,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
        uint32_t sclk, mclk, activity_percent;
        uint32_t offset, val_vid;
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct amdgpu_device *adev = hwmgr->adev;
 
        /* size must be at least 4 bytes for all sensors */
        if (*size < 4)
@@ -4038,7 +4043,21 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                *size = 4;
                return 0;
        case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
-               return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+               if ((adev->asic_type != CHIP_HAWAII) &&
+                   (adev->asic_type != CHIP_BONAIRE) &&
+                   (adev->asic_type != CHIP_FIJI) &&
+                   (adev->asic_type != CHIP_TONGA))
+                       return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+               else
+                       return -EOPNOTSUPP;
+       case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+               if ((adev->asic_type != CHIP_HAWAII) &&
+                   (adev->asic_type != CHIP_BONAIRE) &&
+                   (adev->asic_type != CHIP_FIJI) &&
+                   (adev->asic_type != CHIP_TONGA))
+                       return -EOPNOTSUPP;
+               else
+                       return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
        case AMDGPU_PP_SENSOR_VDDGFX:
                if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
                    (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
index 9e4228232f024e53cf61dcc4027bbe0d93a7153c..ad1fd3150d03eda5aad4beb8f674829526842b6d 100644 (file)
@@ -2298,6 +2298,7 @@ static uint32_t ci_get_mac_definition(uint32_t value)
        case SMU_MAX_ENTRIES_SMIO:
                return SMU7_MAX_ENTRIES_SMIO;
        case SMU_MAX_LEVELS_VDDC:
+       case SMU_MAX_LEVELS_VDDGFX:
                return SMU7_MAX_LEVELS_VDDC;
        case SMU_MAX_LEVELS_VDDCI:
                return SMU7_MAX_LEVELS_VDDCI;
index 97d9802fe6731fcc4c9e1b35e10329e28fa7194e..17d2f5bff4a7e346810dded2c842be0ee8e79fcc 100644 (file)
@@ -2263,6 +2263,7 @@ static uint32_t iceland_get_mac_definition(uint32_t value)
        case SMU_MAX_ENTRIES_SMIO:
                return SMU71_MAX_ENTRIES_SMIO;
        case SMU_MAX_LEVELS_VDDC:
+       case SMU_MAX_LEVELS_VDDGFX:
                return SMU71_MAX_LEVELS_VDDC;
        case SMU_MAX_LEVELS_VDDCI:
                return SMU71_MAX_LEVELS_VDDCI;
index fef2d290f3f2526b2a196649e7e74a0df154b6ec..7b812b9994d7cee23eb8785f7982f69de078cea5 100644 (file)
@@ -123,7 +123,7 @@ typedef enum {
   VOLTAGE_GUARDBAND_COUNT
 } GFX_GUARDBAND_e;
 
-#define SMU_METRICS_TABLE_VERSION 0x9
+#define SMU_METRICS_TABLE_VERSION 0xB
 
 typedef struct __attribute__((packed, aligned(4))) {
   uint32_t AccumulationCounter;
@@ -219,7 +219,103 @@ typedef struct __attribute__((packed, aligned(4))) {
   uint32_t PCIenReplayARolloverCountAcc;  // The Pcie counter itself is accumulated
   uint32_t PCIeNAKSentCountAcc;           // The Pcie counter itself is accumulated
   uint32_t PCIeNAKReceivedCountAcc;       // The Pcie counter itself is accumulated
-} MetricsTable_t;
+
+  // VCN/JPEG ACTIVITY
+  uint32_t VcnBusy[4];
+  uint32_t JpegBusy[32];
+} MetricsTableX_t;
+
+typedef struct __attribute__((packed, aligned(4))) {
+  uint32_t AccumulationCounter;
+
+  //TEMPERATURE
+  uint32_t MaxSocketTemperature;
+  uint32_t MaxVrTemperature;
+  uint32_t MaxHbmTemperature;
+  uint64_t MaxSocketTemperatureAcc;
+  uint64_t MaxVrTemperatureAcc;
+  uint64_t MaxHbmTemperatureAcc;
+
+  //POWER
+  uint32_t SocketPowerLimit;
+  uint32_t MaxSocketPowerLimit;
+  uint32_t SocketPower;
+
+  //ENERGY
+  uint64_t Timestamp;
+  uint64_t SocketEnergyAcc;
+  uint64_t CcdEnergyAcc;
+  uint64_t XcdEnergyAcc;
+  uint64_t AidEnergyAcc;
+  uint64_t HbmEnergyAcc;
+
+  //FREQUENCY
+  uint32_t CclkFrequencyLimit;
+  uint32_t GfxclkFrequencyLimit;
+  uint32_t FclkFrequency;
+  uint32_t UclkFrequency;
+  uint32_t SocclkFrequency[4];
+  uint32_t VclkFrequency[4];
+  uint32_t DclkFrequency[4];
+  uint32_t LclkFrequency[4];
+  uint64_t GfxclkFrequencyAcc[8];
+  uint64_t CclkFrequencyAcc[96];
+
+  //FREQUENCY RANGE
+  uint32_t MaxCclkFrequency;
+  uint32_t MinCclkFrequency;
+  uint32_t MaxGfxclkFrequency;
+  uint32_t MinGfxclkFrequency;
+  uint32_t FclkFrequencyTable[4];
+  uint32_t UclkFrequencyTable[4];
+  uint32_t SocclkFrequencyTable[4];
+  uint32_t VclkFrequencyTable[4];
+  uint32_t DclkFrequencyTable[4];
+  uint32_t LclkFrequencyTable[4];
+  uint32_t MaxLclkDpmRange;
+  uint32_t MinLclkDpmRange;
+
+  //XGMI
+  uint32_t XgmiWidth;
+  uint32_t XgmiBitrate;
+  uint64_t XgmiReadBandwidthAcc[8];
+  uint64_t XgmiWriteBandwidthAcc[8];
+
+  //ACTIVITY
+  uint32_t SocketC0Residency;
+  uint32_t SocketGfxBusy;
+  uint32_t DramBandwidthUtilization;
+  uint64_t SocketC0ResidencyAcc;
+  uint64_t SocketGfxBusyAcc;
+  uint64_t DramBandwidthAcc;
+  uint32_t MaxDramBandwidth;
+  uint64_t DramBandwidthUtilizationAcc;
+  uint64_t PcieBandwidthAcc[4];
+
+  //THROTTLERS
+  uint32_t ProchotResidencyAcc;
+  uint32_t PptResidencyAcc;
+  uint32_t SocketThmResidencyAcc;
+  uint32_t VrThmResidencyAcc;
+  uint32_t HbmThmResidencyAcc;
+  uint32_t GfxLockXCDMak;
+
+  // New Items at end to maintain driver compatibility
+  uint32_t GfxclkFrequency[8];
+
+  //PSNs
+  uint64_t PublicSerialNumber_AID[4];
+  uint64_t PublicSerialNumber_XCD[8];
+  uint64_t PublicSerialNumber_CCD[12];
+
+  //XGMI Data tranfser size
+  uint64_t XgmiReadDataSizeAcc[8];//in KByte
+  uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+  // VCN/JPEG ACTIVITY
+  uint32_t VcnBusy[4];
+  uint32_t JpegBusy[32];
+} MetricsTableA_t;
 
 #define SMU_VF_METRICS_TABLE_VERSION 0x3
 
index 3998c9b31d076bfa6a1d4cc473e49a1ad83078b7..a28649f210933c03721c71e4e1fedb6935d00130 100644 (file)
@@ -248,6 +248,8 @@ struct PPTable_t {
 #define SMUQ10_TO_UINT(x) ((x) >> 10)
 #define SMUQ10_FRAC(x) ((x) & 0x3ff)
 #define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\
+               (metrics_a->field) : (metrics_x->field))
 
 struct smu_v13_0_6_dpm_map {
        enum smu_clk_type clk_type;
@@ -330,7 +332,8 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
                SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
                               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 
-       SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
+       SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+                      max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)),
                       PAGE_SIZE,
                       AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
 
@@ -338,12 +341,13 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
                       PAGE_SIZE,
                       AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
 
-       smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+       smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t),
+                      sizeof(MetricsTableA_t)), GFP_KERNEL);
        if (!smu_table->metrics_table)
                return -ENOMEM;
        smu_table->metrics_time = 0;
 
-       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_4);
+       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5);
        smu_table->gpu_metrics_table =
                kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
        if (!smu_table->gpu_metrics_table) {
@@ -469,9 +473,11 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
 static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
-       MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+       MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+       MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
        struct PPTable_t *pptable =
                (struct PPTable_t *)smu_table->driver_pptable;
+       struct amdgpu_device *adev = smu->adev;
        int ret, i, retry = 100;
        uint32_t table_version;
 
@@ -483,7 +489,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
                                return ret;
 
                        /* Ensure that metrics have been updated */
-                       if (metrics->AccumulationCounter)
+                       if (GET_METRIC_FIELD(AccumulationCounter))
                                break;
 
                        usleep_range(1000, 1100);
@@ -500,29 +506,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
                        table_version;
 
                pptable->MaxSocketPowerLimit =
-                       SMUQ10_ROUND(metrics->MaxSocketPowerLimit);
+                       SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit));
                pptable->MaxGfxclkFrequency =
-                       SMUQ10_ROUND(metrics->MaxGfxclkFrequency);
+                       SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency));
                pptable->MinGfxclkFrequency =
-                       SMUQ10_ROUND(metrics->MinGfxclkFrequency);
+                       SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency));
 
                for (i = 0; i < 4; ++i) {
                        pptable->FclkFrequencyTable[i] =
-                               SMUQ10_ROUND(metrics->FclkFrequencyTable[i]);
+                               SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]);
                        pptable->UclkFrequencyTable[i] =
-                               SMUQ10_ROUND(metrics->UclkFrequencyTable[i]);
+                               SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]);
                        pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
-                               metrics->SocclkFrequencyTable[i]);
+                               GET_METRIC_FIELD(SocclkFrequencyTable)[i]);
                        pptable->VclkFrequencyTable[i] =
-                               SMUQ10_ROUND(metrics->VclkFrequencyTable[i]);
+                               SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]);
                        pptable->DclkFrequencyTable[i] =
-                               SMUQ10_ROUND(metrics->DclkFrequencyTable[i]);
+                               SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]);
                        pptable->LclkFrequencyTable[i] =
-                               SMUQ10_ROUND(metrics->LclkFrequencyTable[i]);
+                               SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]);
                }
 
                /* use AID0 serial number by default */
-               pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0];
+               pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0];
 
                pptable->Init = true;
        }
@@ -824,7 +830,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
                                            uint32_t *value)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
-       MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+       MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+       MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
        int xcc_id;
@@ -839,50 +846,50 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
        case METRICS_AVERAGE_GFXCLK:
                if (smu->smc_fw_version >= 0x552F00) {
                        xcc_id = GET_INST(GC, 0);
-                       *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+                       *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
                } else {
                        *value = 0;
                }
                break;
        case METRICS_CURR_SOCCLK:
        case METRICS_AVERAGE_SOCCLK:
-               *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]);
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]);
                break;
        case METRICS_CURR_UCLK:
        case METRICS_AVERAGE_UCLK:
-               *value = SMUQ10_ROUND(metrics->UclkFrequency);
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
                break;
        case METRICS_CURR_VCLK:
-               *value = SMUQ10_ROUND(metrics->VclkFrequency[0]);
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]);
                break;
        case METRICS_CURR_DCLK:
-               *value = SMUQ10_ROUND(metrics->DclkFrequency[0]);
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]);
                break;
        case METRICS_CURR_FCLK:
-               *value = SMUQ10_ROUND(metrics->FclkFrequency);
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency));
                break;
        case METRICS_AVERAGE_GFXACTIVITY:
-               *value = SMUQ10_ROUND(metrics->SocketGfxBusy);
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
                break;
        case METRICS_AVERAGE_MEMACTIVITY:
-               *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
                break;
        case METRICS_CURR_SOCKETPOWER:
-               *value = SMUQ10_ROUND(metrics->SocketPower) << 8;
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8;
                break;
        case METRICS_TEMPERATURE_HOTSPOT:
-               *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) *
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) *
                         SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
                break;
        case METRICS_TEMPERATURE_MEM:
-               *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) *
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) *
                         SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
                break;
        /* This is the max of all VRs and not just SOC VR.
         * No need to define another data type for the same.
         */
        case METRICS_TEMPERATURE_VRSOC:
-               *value = SMUQ10_ROUND(metrics->MaxVrTemperature) *
+               *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) *
                         SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
                break;
        default:
@@ -963,7 +970,9 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
                        if (i < (clocks.num_levels - 1))
                                clk2 = clocks.data[i + 1].clocks_in_khz / 1000;
 
-                       if (curr_clk >= clk1 && curr_clk < clk2) {
+                       if (curr_clk == clk1) {
+                               level = i;
+                       } else if (curr_clk >= clk1 && curr_clk < clk2) {
                                level = (curr_clk - clk1) <= (clk2 - curr_clk) ?
                                                i :
                                                i + 1;
@@ -2067,67 +2076,70 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
 static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
-       struct gpu_metrics_v1_4 *gpu_metrics =
-               (struct gpu_metrics_v1_4 *)smu_table->gpu_metrics_table;
+       struct gpu_metrics_v1_5 *gpu_metrics =
+               (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table;
        struct amdgpu_device *adev = smu->adev;
-       int ret = 0, xcc_id, inst, i;
-       MetricsTable_t *metrics;
+       int ret = 0, xcc_id, inst, i, j;
+       MetricsTableX_t *metrics_x;
+       MetricsTableA_t *metrics_a;
        u16 link_width_level;
 
-       metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
-       ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+       metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
+       ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
        if (ret) {
-               kfree(metrics);
+               kfree(metrics_x);
                return ret;
        }
 
-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 4);
+       metrics_a = (MetricsTableA_t *)metrics_x;
+
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5);
 
        gpu_metrics->temperature_hotspot =
-               SMUQ10_ROUND(metrics->MaxSocketTemperature);
+               SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature));
        /* Individual HBM stack temperature is not reported */
        gpu_metrics->temperature_mem =
-               SMUQ10_ROUND(metrics->MaxHbmTemperature);
+               SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature));
        /* Reports max temperature of all voltage rails */
        gpu_metrics->temperature_vrsoc =
-               SMUQ10_ROUND(metrics->MaxVrTemperature);
+               SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature));
 
        gpu_metrics->average_gfx_activity =
-               SMUQ10_ROUND(metrics->SocketGfxBusy);
+               SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
        gpu_metrics->average_umc_activity =
-               SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+               SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
 
        gpu_metrics->curr_socket_power =
-               SMUQ10_ROUND(metrics->SocketPower);
+               SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower));
        /* Energy counter reported in 15.259uJ (2^-16) units */
-       gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+       gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc);
 
        for (i = 0; i < MAX_GFX_CLKS; i++) {
                xcc_id = GET_INST(GC, i);
                if (xcc_id >= 0)
                        gpu_metrics->current_gfxclk[i] =
-                               SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+                               SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
 
                if (i < MAX_CLKS) {
                        gpu_metrics->current_socclk[i] =
-                               SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+                               SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]);
                        inst = GET_INST(VCN, i);
                        if (inst >= 0) {
                                gpu_metrics->current_vclk0[i] =
-                                       SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+                                       SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]);
                                gpu_metrics->current_dclk0[i] =
-                                       SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+                                       SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]);
                        }
                }
        }
 
-       gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency);
+       gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
 
        /* Throttle status is not reported through metrics now */
        gpu_metrics->throttle_status = 0;
 
        /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
-       gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0);
+       gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
 
        if (!(adev->flags & AMD_IS_APU)) {
                link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
@@ -2139,38 +2151,57 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
                gpu_metrics->pcie_link_speed =
                        smu_v13_0_6_get_current_pcie_link_speed(smu);
                gpu_metrics->pcie_bandwidth_acc =
-                               SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
+                               SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
                gpu_metrics->pcie_bandwidth_inst =
-                               SMUQ10_ROUND(metrics->PcieBandwidth[0]);
+                               SMUQ10_ROUND(metrics_x->PcieBandwidth[0]);
                gpu_metrics->pcie_l0_to_recov_count_acc =
-                               metrics->PCIeL0ToRecoveryCountAcc;
+                               metrics_x->PCIeL0ToRecoveryCountAcc;
                gpu_metrics->pcie_replay_count_acc =
-                               metrics->PCIenReplayAAcc;
+                               metrics_x->PCIenReplayAAcc;
                gpu_metrics->pcie_replay_rover_count_acc =
-                               metrics->PCIenReplayARolloverCountAcc;
+                               metrics_x->PCIenReplayARolloverCountAcc;
+               gpu_metrics->pcie_nak_sent_count_acc =
+                               metrics_x->PCIeNAKSentCountAcc;
+               gpu_metrics->pcie_nak_rcvd_count_acc =
+                               metrics_x->PCIeNAKReceivedCountAcc;
        }
 
        gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
        gpu_metrics->gfx_activity_acc =
-               SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+               SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc));
        gpu_metrics->mem_activity_acc =
-               SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+               SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc));
 
        for (i = 0; i < NUM_XGMI_LINKS; i++) {
                gpu_metrics->xgmi_read_data_acc[i] =
-                       SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
+                       SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]);
                gpu_metrics->xgmi_write_data_acc[i] =
-                       SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
+                       SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]);
        }
 
-       gpu_metrics->xgmi_link_width = SMUQ10_ROUND(metrics->XgmiWidth);
-       gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(metrics->XgmiBitrate);
+       for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+               inst = GET_INST(JPEG, i);
+               for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+                       gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] =
+                               SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy)
+                               [(inst * adev->jpeg.num_jpeg_rings) + j]);
+               }
+       }
 
-       gpu_metrics->firmware_timestamp = metrics->Timestamp;
+       for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+               inst = GET_INST(VCN, i);
+               gpu_metrics->vcn_activity[i] =
+                       SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]);
+       }
+
+       gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth));
+       gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate));
+
+       gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp);
 
        *table = (void *)gpu_metrics;
-       kfree(metrics);
+       kfree(metrics_x);
 
        return sizeof(*gpu_metrics);
 }
@@ -2206,17 +2237,18 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
                        continue;
                }
 
-               if (ret) {
-                       dev_err(adev->dev,
-                               "failed to send mode2 message \tparam: 0x%08x error code %d\n",
-                               SMU_RESET_MODE_2, ret);
+               if (ret)
                        goto out;
-               }
+
        } while (ret == -ETIME && timeout);
 
 out:
        mutex_unlock(&smu->message_lock);
 
+       if (ret)
+               dev_err(adev->dev, "failed to send mode2 reset, error code %d",
+                       ret);
+
        return ret;
 }
 
@@ -2524,9 +2556,9 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct
                return 0;
        }
 
-       if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0))
+       if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0))
                *count = 1;
-       else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0))
+       else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0))
                *count = 1;
 
        return 0;
@@ -2864,6 +2896,13 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
        return ret;
 }
 
+static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu,
+                       void *table)
+{
+       /* Support ecc info by default */
+       return 0;
+}
+
 static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
        /* init dpm */
        .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -2918,6 +2957,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
        .i2c_init = smu_v13_0_6_i2c_control_init,
        .i2c_fini = smu_v13_0_6_i2c_control_fini,
        .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
+       .get_ecc_info = smu_v13_0_6_get_ecc_info,
 };
 
 void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
index 001a5cf096579cc27a97501d2b4e803188f73b14..00cd615bbcdc0bf1b88dbb67e94c86b86173777a 100644 (file)
@@ -989,6 +989,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
        case METRICS_VERSION(1, 4):
                structure_size = sizeof(struct gpu_metrics_v1_4);
                break;
+       case METRICS_VERSION(1, 5):
+               structure_size = sizeof(struct gpu_metrics_v1_5);
+               break;
        case METRICS_VERSION(2, 0):
                structure_size = sizeof(struct gpu_metrics_v2_0);
                break;
index 3cc0ffc28e862158d16a26255efa8d87302283f8..888aadb6a4acbb727f08b8f353c59856dc595808 100644 (file)
@@ -461,6 +461,7 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
 
                INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
                INIT_LIST_HEAD(&arg.fbs);
+               drm_WARN_ON(dev, !list_empty(&fb->filp_head));
                list_add_tail(&fb->filp_head, &arg.fbs);
 
                schedule_work(&arg.work);
@@ -827,6 +828,8 @@ void drm_framebuffer_free(struct kref *kref)
                        container_of(kref, struct drm_framebuffer, base.refcount);
        struct drm_device *dev = fb->dev;
 
+       drm_WARN_ON(dev, !list_empty(&fb->filp_head));
+
        /*
         * The lookup idr holds a weak reference, which has not necessarily been
         * removed at this point. Check for that.
@@ -1119,7 +1122,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
 
        dev = fb->dev;
 
-       WARN_ON(!list_empty(&fb->filp_head));
+       drm_WARN_ON(dev, !list_empty(&fb->filp_head));
 
        /*
         * drm ABI mandates that we remove any deleted framebuffers from active
index 9e8e4c60983d60fb07bdf6b314db7cfc23ac38c2..672c655c7a8e7e4bea908ddcb5d74fc7c1549674 100644 (file)
@@ -1503,6 +1503,7 @@ retry:
 out:
        if (fb)
                drm_framebuffer_put(fb);
+       fb = NULL;
        if (plane->old_fb)
                drm_framebuffer_put(plane->old_fb);
        plane->old_fb = NULL;
index 884a1da3608930e1eed49e11c4b192d0106b906e..6b25e195232f13376f9b435f04fa48c50c01abdd 100644 (file)
@@ -3067,24 +3067,29 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
 {
        struct drm_i915_private *i915 = to_i915(crtc->base.dev);
        const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
-       bool use_mplla;
+       bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB;
+       bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB;
        int i;
 
-       use_mplla = intel_c20_use_mplla(mpll_hw_state->clock);
-       if (use_mplla) {
-               for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
-                       I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
-                                       "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
-                                       crtc->base.base.id, crtc->base.name, i,
-                                       mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
-               }
-       } else {
+       I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb,
+                       "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
+                       crtc->base.base.id, crtc->base.name,
+                       sw_use_mpllb, hw_use_mpllb);
+
+       if (hw_use_mpllb) {
                for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) {
                        I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
                                        "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
                                        crtc->base.base.id, crtc->base.name, i,
                                        mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
                }
+       } else {
+               for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
+                       I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
+                                       "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
+                                       crtc->base.base.id, crtc->base.name, i,
+                                       mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
+               }
        }
 
        for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) {
index 5f091502719b956a7fc3037ecc473b81ba3455d9..6fd4fa52253a35a02957509d7135310346242d41 100644 (file)
@@ -405,8 +405,8 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
                                                     struct drm_i915_private,
                                                     display.power.domains);
 
-       drm_dbg(&i915->drm, "async_put_wakeref %lu\n",
-               power_domains->async_put_wakeref);
+       drm_dbg(&i915->drm, "async_put_wakeref: %s\n",
+               str_yes_no(power_domains->async_put_wakeref));
 
        print_power_domains(power_domains, "async_put_domains[0]",
                            &power_domains->async_put_domains[0]);
index 3b2482bf683ff3c9b7dd5caa3c9a83db7ac476fe..c3b906ebe542f24151180fbd880575a1619287c1 100644 (file)
@@ -2101,7 +2101,7 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
                }
        }
 
-       dsc_max_bpc = intel_dp_dsc_min_src_input_bpc(i915);
+       dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
        if (!dsc_max_bpc)
                return -EINVAL;
 
index b6e2e70e129046040336012604f3955a12c3ce8f..8f702c3fc62d483e6ba92d4d02537576975441ae 100644 (file)
@@ -3319,11 +3319,11 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct dentry *root = connector->base.debugfs_entry;
 
-       if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) {
-               if (!(HAS_DP20(i915) &&
-                     connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort))
-                       return;
-       }
+       /* TODO: Add support for MST connectors as well. */
+       if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
+            connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+           connector->mst_port)
+               return;
 
        debugfs_create_file("i915_psr_sink_status", 0444, root,
                            connector, &i915_psr_sink_status_fops);
index c573c067779f58ad0187832324cdb3fcc2892bc9..03bc7f9d191b98a4df1201098f9fa5c160a3502c 100644 (file)
@@ -412,9 +412,9 @@ struct i915_gem_context {
 
        /** @stale: tracks stale engines to be destroyed */
        struct {
-               /** @lock: guards engines */
+               /** @stale.lock: guards engines */
                spinlock_t lock;
-               /** @engines: list of stale engines */
+               /** @stale.engines: list of stale engines */
                struct list_head engines;
        } stale;
 };
index 81a57dd52dfda48d31506c7295dbb1ad1868ea57..555022c0652c804d55efbfa94b2d0ca5f9f97225 100644 (file)
@@ -1159,7 +1159,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache)
 
        vaddr = unmask_page(cache->vaddr);
        if (cache->vaddr & KMAP)
-               kunmap_atomic(vaddr);
+               kunmap_local(vaddr);
        else
                io_mapping_unmap_atomic((void __iomem *)vaddr);
 }
@@ -1175,7 +1175,7 @@ static void reloc_cache_remap(struct reloc_cache *cache,
        if (cache->vaddr & KMAP) {
                struct page *page = i915_gem_object_get_page(obj, cache->page);
 
-               vaddr = kmap_atomic(page);
+               vaddr = kmap_local_page(page);
                cache->vaddr = unmask_flags(cache->vaddr) |
                        (unsigned long)vaddr;
        } else {
@@ -1205,7 +1205,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
                if (cache->vaddr & CLFLUSH_AFTER)
                        mb();
 
-               kunmap_atomic(vaddr);
+               kunmap_local(vaddr);
                i915_gem_object_finish_access(obj);
        } else {
                struct i915_ggtt *ggtt = cache_to_ggtt(cache);
@@ -1237,7 +1237,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
        struct page *page;
 
        if (cache->vaddr) {
-               kunmap_atomic(unmask_page(cache->vaddr));
+               kunmap_local(unmask_page(cache->vaddr));
        } else {
                unsigned int flushes;
                int err;
@@ -1259,7 +1259,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
        if (!obj->mm.dirty)
                set_page_dirty(page);
 
-       vaddr = kmap_atomic(page);
+       vaddr = kmap_local_page(page);
        cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
        cache->page = pageno;
 
index 25eeeb863209eece6a3123ba45f3a1c605f4bb78..58e6c680fe0df6f7ee6223eef68cf67c5eaf0b4e 100644 (file)
@@ -500,17 +500,15 @@ static void
 i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
 {
        pgoff_t idx = offset >> PAGE_SHIFT;
-       void *src_map;
        void *src_ptr;
 
-       src_map = kmap_atomic(i915_gem_object_get_page(obj, idx));
-
-       src_ptr = src_map + offset_in_page(offset);
+       src_ptr = kmap_local_page(i915_gem_object_get_page(obj, idx))
+                 + offset_in_page(offset);
        if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
                drm_clflush_virt_range(src_ptr, size);
        memcpy(dst, src_ptr, size);
 
-       kunmap_atomic(src_map);
+       kunmap_local(src_ptr);
 }
 
 static void
index 5df128e2f4dc2082962818da89ab9d0816623057..ef85c6dc9fd592f9db1fec8e99e24974f3dc7029 100644 (file)
@@ -65,16 +65,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        dst = vaddr;
        for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
                struct page *page;
-               void *src;
 
                page = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(page))
                        goto err_st;
 
-               src = kmap_atomic(page);
-               memcpy(dst, src, PAGE_SIZE);
+               memcpy_from_page(dst, page, 0, PAGE_SIZE);
                drm_clflush_virt_range(dst, PAGE_SIZE);
-               kunmap_atomic(src);
 
                put_page(page);
                dst += PAGE_SIZE;
@@ -113,16 +110,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 
                for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
                        struct page *page;
-                       char *dst;
 
                        page = shmem_read_mapping_page(mapping, i);
                        if (IS_ERR(page))
                                continue;
 
-                       dst = kmap_atomic(page);
                        drm_clflush_virt_range(src, PAGE_SIZE);
-                       memcpy(dst, src, PAGE_SIZE);
-                       kunmap_atomic(dst);
+                       memcpy_to_page(page, 0, src, PAGE_SIZE);
 
                        set_page_dirty(page);
                        if (obj->mm.madv == I915_MADV_WILLNEED)
index 73a4a4eb29e08689fdac30acd3a88739f3205a12..38b72d86560f0547a2c6cdcbe1ff531d56fda475 100644 (file)
@@ -485,11 +485,13 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
                if (err < 0)
                        return err;
 
-               vaddr = kmap_atomic(page);
+               vaddr = kmap_local_page(page);
+               pagefault_disable();
                unwritten = __copy_from_user_inatomic(vaddr + pg,
                                                      user_data,
                                                      len);
-               kunmap_atomic(vaddr);
+               pagefault_enable();
+               kunmap_local(vaddr);
 
                err = aops->write_end(obj->base.filp, mapping, offset, len,
                                      len - unwritten, page, data);
index 6b9f6cf50bf6b24b9bb5804b123db70adeb206d8..c9e6d77abab075e490ee434e0a50c3e9235b1400 100644 (file)
@@ -1082,7 +1082,7 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
                goto err_unlock;
 
        for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
-               u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
+               u32 *ptr = kmap_local_page(i915_gem_object_get_page(obj, n));
 
                if (needs_flush & CLFLUSH_BEFORE)
                        drm_clflush_virt_range(ptr, PAGE_SIZE);
@@ -1090,12 +1090,12 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
                if (ptr[dword] != val) {
                        pr_err("n=%lu ptr[%u]=%u, val=%u\n",
                               n, dword, ptr[dword], val);
-                       kunmap_atomic(ptr);
+                       kunmap_local(ptr);
                        err = -EINVAL;
                        break;
                }
 
-               kunmap_atomic(ptr);
+               kunmap_local(ptr);
        }
 
        i915_gem_object_finish_access(obj);
index 3fd68a099a85efc37d4842abe344c5d0e3893f11..2a0c0634d446ed0b59871374d25104c06214dc87 100644 (file)
@@ -24,7 +24,6 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
 {
        unsigned int needs_clflush;
        struct page *page;
-       void *map;
        u32 *cpu;
        int err;
 
@@ -34,8 +33,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
                goto out;
 
        page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
-       map = kmap_atomic(page);
-       cpu = map + offset_in_page(offset);
+       cpu = kmap_local_page(page) + offset_in_page(offset);
 
        if (needs_clflush & CLFLUSH_BEFORE)
                drm_clflush_virt_range(cpu, sizeof(*cpu));
@@ -45,7 +43,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
        if (needs_clflush & CLFLUSH_AFTER)
                drm_clflush_virt_range(cpu, sizeof(*cpu));
 
-       kunmap_atomic(map);
+       kunmap_local(cpu);
        i915_gem_object_finish_access(ctx->obj);
 
 out:
@@ -57,7 +55,6 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
 {
        unsigned int needs_clflush;
        struct page *page;
-       void *map;
        u32 *cpu;
        int err;
 
@@ -67,15 +64,14 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
                goto out;
 
        page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
-       map = kmap_atomic(page);
-       cpu = map + offset_in_page(offset);
+       cpu = kmap_local_page(page) + offset_in_page(offset);
 
        if (needs_clflush & CLFLUSH_BEFORE)
                drm_clflush_virt_range(cpu, sizeof(*cpu));
 
        *v = *cpu;
 
-       kunmap_atomic(map);
+       kunmap_local(cpu);
        i915_gem_object_finish_access(ctx->obj);
 
 out:
index 7021b6e9b219ef3502af7beaa3770a5a9a251409..89d4dc8b60c6a28fc82c8ce0e2554c0a0e537e81 100644 (file)
@@ -489,12 +489,12 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
        for (n = 0; n < real_page_count(obj); n++) {
                u32 *map;
 
-               map = kmap_atomic(i915_gem_object_get_page(obj, n));
+               map = kmap_local_page(i915_gem_object_get_page(obj, n));
                for (m = 0; m < DW_PER_PAGE; m++)
                        map[m] = value;
                if (!has_llc)
                        drm_clflush_virt_range(map, PAGE_SIZE);
-               kunmap_atomic(map);
+               kunmap_local(map);
        }
 
        i915_gem_object_finish_access(obj);
@@ -520,7 +520,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
        for (n = 0; n < real_page_count(obj); n++) {
                u32 *map, m;
 
-               map = kmap_atomic(i915_gem_object_get_page(obj, n));
+               map = kmap_local_page(i915_gem_object_get_page(obj, n));
                if (needs_flush & CLFLUSH_BEFORE)
                        drm_clflush_virt_range(map, PAGE_SIZE);
 
@@ -546,7 +546,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
                }
 
 out_unmap:
-               kunmap_atomic(map);
+               kunmap_local(map);
                if (err)
                        break;
        }
index e57f9390076c5567abcafa9810e15626d6f8a033..d684a70f2c0422554353eb5ca88a01fc1104b29c 100644 (file)
@@ -504,7 +504,7 @@ static int igt_dmabuf_export_vmap(void *arg)
        }
 
        if (memchr_inv(ptr, 0, dmabuf->size)) {
-               pr_err("Exported object not initialiased to zero!\n");
+               pr_err("Exported object not initialised to zero!\n");
                err = -EINVAL;
                goto out;
        }
index 7ab3ca0f9f268dc16a9728b585124ad0d4dd5a7b..013c642514486a45c5a49afbb924c9a6a7ca90a8 100644 (file)
@@ -21,8 +21,11 @@ struct mei_aux_device;
 /**
  * struct intel_gsc - graphics security controller
  *
- * @gem_obj: scratch memory GSC operations
- * @intf : gsc interface
+ * @intf: gsc interface
+ * @intf.adev: MEI aux. device for this @intf
+ * @intf.gem_obj: scratch memory GSC operations
+ * @intf.irq: IRQ for this device (%-1 for no IRQ)
+ * @intf.id: this interface's id number/index
  */
 struct intel_gsc {
        struct intel_gsc_intf {
index 9de41703fae58c3df763347f514c89fc62972c70..50962cfd1353ae4673b27a9bb2437d47633b5651 100644 (file)
 #define XEHP_PSS_MODE2                         MCR_REG(0x703c)
 #define   SCOREBOARD_STALL_FLUSH_CONTROL       REG_BIT(5)
 
+#define XEHP_PSS_CHICKEN                       MCR_REG(0x7044)
+#define   FD_END_COLLECT                       REG_BIT(5)
+
 #define GEN7_SC_INSTDONE                       _MMIO(0x7100)
 #define GEN12_SC_INSTDONE_EXTRA                        _MMIO(0x7104)
 #define GEN12_SC_INSTDONE_EXTRA2               _MMIO(0x7108)
index 4cbf9e51264599433b971b09c46aee43ba9e67fb..3eacbc50caf8d98fd92f45640cf64a5d106c9052 100644 (file)
@@ -777,6 +777,9 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
 
        /* Wa_18019271663:dg2 */
        wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
+
+       /* Wa_14019877138:dg2 */
+       wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
 }
 
 static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
index e22c12ce245adf2e79e7623a9222797c13e62c97..813cc888e6fae3f602661840f726ccf8f8908ec2 100644 (file)
@@ -105,61 +105,67 @@ struct intel_guc {
         */
        struct {
                /**
-                * @lock: protects everything in submission_state,
-                * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
-                * out of zero
+                * @submission_state.lock: protects everything in
+                * submission_state, ce->guc_id.id, and ce->guc_id.ref
+                * when transitioning in and out of zero
                 */
                spinlock_t lock;
                /**
-                * @guc_ids: used to allocate new guc_ids, single-lrc
+                * @submission_state.guc_ids: used to allocate new
+                * guc_ids, single-lrc
                 */
                struct ida guc_ids;
                /**
-                * @num_guc_ids: Number of guc_ids, selftest feature to be able
-                * to reduce this number while testing.
+                * @submission_state.num_guc_ids: Number of guc_ids, selftest
+                * feature to be able to reduce this number while testing.
                 */
                int num_guc_ids;
                /**
-                * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
+                * @submission_state.guc_ids_bitmap: used to allocate
+                * new guc_ids, multi-lrc
                 */
                unsigned long *guc_ids_bitmap;
                /**
-                * @guc_id_list: list of intel_context with valid guc_ids but no
-                * refs
+                * @submission_state.guc_id_list: list of intel_context
+                * with valid guc_ids but no refs
                 */
                struct list_head guc_id_list;
                /**
-                * @guc_ids_in_use: Number single-lrc guc_ids in use
+                * @submission_state.guc_ids_in_use: Number single-lrc
+                * guc_ids in use
                 */
                unsigned int guc_ids_in_use;
                /**
-                * @destroyed_contexts: list of contexts waiting to be destroyed
-                * (deregistered with the GuC)
+                * @submission_state.destroyed_contexts: list of contexts
+                * waiting to be destroyed (deregistered with the GuC)
                 */
                struct list_head destroyed_contexts;
                /**
-                * @destroyed_worker: worker to deregister contexts, need as we
-                * need to take a GT PM reference and can't from destroy
-                * function as it might be in an atomic context (no sleeping)
+                * @submission_state.destroyed_worker: worker to deregister
+                * contexts, need as we need to take a GT PM reference and
+                * can't from destroy function as it might be in an atomic
+                * context (no sleeping)
                 */
                struct work_struct destroyed_worker;
                /**
-                * @reset_fail_worker: worker to trigger a GT reset after an
-                * engine reset fails
+                * @submission_state.reset_fail_worker: worker to trigger
+                * a GT reset after an engine reset fails
                 */
                struct work_struct reset_fail_worker;
                /**
-                * @reset_fail_mask: mask of engines that failed to reset
+                * @submission_state.reset_fail_mask: mask of engines that
+                * failed to reset
                 */
                intel_engine_mask_t reset_fail_mask;
                /**
-                * @sched_disable_delay_ms: schedule disable delay, in ms, for
-                * contexts
+                * @submission_state.sched_disable_delay_ms: schedule
+                * disable delay, in ms, for contexts
                 */
                unsigned int sched_disable_delay_ms;
                /**
-                * @sched_disable_gucid_threshold: threshold of min remaining available
-                * guc_ids before we start bypassing the schedule disable delay
+                * @submission_state.sched_disable_gucid_threshold:
+                * threshold of min remaining available guc_ids before
+                * we start bypassing the schedule disable delay
                 */
                unsigned int sched_disable_gucid_threshold;
        } submission_state;
@@ -243,37 +249,40 @@ struct intel_guc {
         */
        struct {
                /**
-                * @lock: Lock protecting the below fields and the engine stats.
+                * @timestamp.lock: Lock protecting the below fields and
+                * the engine stats.
                 */
                spinlock_t lock;
 
                /**
-                * @gt_stamp: 64 bit extended value of the GT timestamp.
+                * @timestamp.gt_stamp: 64-bit extended value of the GT
+                * timestamp.
                 */
                u64 gt_stamp;
 
                /**
-                * @ping_delay: Period for polling the GT timestamp for
-                * overflow.
+                * @timestamp.ping_delay: Period for polling the GT
+                * timestamp for overflow.
                 */
                unsigned long ping_delay;
 
                /**
-                * @work: Periodic work to adjust GT timestamp, engine and
-                * context usage for overflows.
+                * @timestamp.work: Periodic work to adjust GT timestamp,
+                * engine and context usage for overflows.
                 */
                struct delayed_work work;
 
                /**
-                * @shift: Right shift value for the gpm timestamp
+                * @timestamp.shift: Right shift value for the gpm timestamp
                 */
                u32 shift;
 
                /**
-                * @last_stat_jiffies: jiffies at last actual stats collection time
-                * We use this timestamp to ensure we don't oversample the
-                * stats because runtime power management events can trigger
-                * stats collection at much higher rates than required.
+                * @timestamp.last_stat_jiffies: jiffies at last actual
+                * stats collection time. We use this timestamp to ensure
+                * we don't oversample the stats because runtime power
+                * management events can trigger stats collection at much
+                * higher rates than required.
                 */
                unsigned long last_stat_jiffies;
        } timestamp;
index 362639162ed60340383188e42031d22acb8ef28d..756093eaf2ad186e1962271cc176e58cf4e54d69 100644 (file)
@@ -1343,16 +1343,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
 
                for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
                        u32 len = min_t(u32, size, PAGE_SIZE - offset);
-                       void *vaddr;
 
                        if (idx > 0) {
                                idx--;
                                continue;
                        }
 
-                       vaddr = kmap_atomic(page);
-                       memcpy(dst, vaddr + offset, len);
-                       kunmap_atomic(vaddr);
+                       memcpy_from_page(dst, page, offset, len);
 
                        offset = 0;
                        dst += len;
index ddf49c2dbb917c5dc650dfd88f409727760e4ba0..2905df83e180ed91eddbde3c30dafba4e0fb2991 100644 (file)
@@ -1211,11 +1211,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                for (n = offset >> PAGE_SHIFT; remain; n++) {
                        int len = min(remain, PAGE_SIZE - x);
 
-                       src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+                       src = kmap_local_page(i915_gem_object_get_page(src_obj, n));
                        if (src_needs_clflush)
                                drm_clflush_virt_range(src + x, len);
                        memcpy(ptr, src + x, len);
-                       kunmap_atomic(src);
+                       kunmap_local(src);
 
                        ptr += len;
                        remain -= len;
index 975da8e7f2a9f8138c80b03c731ce03856ef7a89..8c3f443c8347e06f5f09700108f4a11c1e903f3b 100644 (file)
@@ -175,7 +175,7 @@ hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
         *     tau4 = (4 | x) << y
         * but add 2 when doing the final right shift to account for units
         */
-       tau4 = ((1 << x_w) | x) << y;
+       tau4 = (u64)((1 << x_w) | x) << y;
        /* val in hwmon interface units (millisec) */
        out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
 
@@ -211,7 +211,7 @@ hwm_power1_max_interval_store(struct device *dev,
        r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
        x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
        y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
-       tau4 = ((1 << x_w) | x) << y;
+       tau4 = (u64)((1 << x_w) | x) << y;
        max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
 
        if (val > max_win)
index 13b1ae9b96c7fddbe1622dbda609d10675021503..46445248d193e6bf0316e4347a993bfc5c01ae1d 100644 (file)
@@ -291,7 +291,8 @@ struct i915_perf_stream {
                int size_exponent;
 
                /**
-                * @ptr_lock: Locks reads and writes to all head/tail state
+                * @oa_buffer.ptr_lock: Locks reads and writes to all
+                * head/tail state
                 *
                 * Consider: the head and tail pointer state needs to be read
                 * consistently from a hrtimer callback (atomic context) and
@@ -313,7 +314,8 @@ struct i915_perf_stream {
                spinlock_t ptr_lock;
 
                /**
-                * @head: Although we can always read back the head pointer register,
+                * @oa_buffer.head: Although we can always read back
+                * the head pointer register,
                 * we prefer to avoid trusting the HW state, just to avoid any
                 * risk that some hardware condition could * somehow bump the
                 * head pointer unpredictably and cause us to forward the wrong
@@ -322,7 +324,8 @@ struct i915_perf_stream {
                u32 head;
 
                /**
-                * @tail: The last verified tail that can be read by userspace.
+                * @oa_buffer.tail: The last verified tail that can be
+                * read by userspace.
                 */
                u32 tail;
        } oa_buffer;
index 2ca7e535799fefcd29261f6f20e4aff8a86fc10f..ecdd5767d8ef535dd8ced7ab2f7d3d3539aff263 100644 (file)
@@ -193,13 +193,14 @@ struct pvr_device {
         * @queues: Queue-related fields.
         */
        struct {
-               /** @active: Active queue list. */
+               /** @queues.active: Active queue list. */
                struct list_head active;
 
-               /** @idle: Idle queue list. */
+               /** @queues.idle: Idle queue list. */
                struct list_head idle;
 
-               /** @lock: Lock protecting access to the active/idle lists. */
+               /** @queues.lock: Lock protecting access to the active/idle
+                *  lists. */
                struct mutex lock;
        } queues;
 
@@ -207,18 +208,18 @@ struct pvr_device {
         * @watchdog: Watchdog for communications with firmware.
         */
        struct {
-               /** @work: Work item for watchdog callback. */
+               /** @watchdog.work: Work item for watchdog callback. */
                struct delayed_work work;
 
                /**
-                * @old_kccb_cmds_executed: KCCB command execution count at last
-                * watchdog poll.
+                * @watchdog.old_kccb_cmds_executed: KCCB command execution
+                * count at last watchdog poll.
                 */
                u32 old_kccb_cmds_executed;
 
                /**
-                * @kccb_stall_count: Number of watchdog polls KCCB has been
-                * stalled for.
+                * @watchdog.kccb_stall_count: Number of watchdog polls
+                * KCCB has been stalled for.
                 */
                u32 kccb_stall_count;
        } watchdog;
@@ -227,43 +228,46 @@ struct pvr_device {
         * @kccb: Circular buffer for communications with firmware.
         */
        struct {
-               /** @ccb: Kernel CCB. */
+               /** @kccb.ccb: Kernel CCB. */
                struct pvr_ccb ccb;
 
-               /** @rtn_q: Waitqueue for KCCB command return waiters. */
+               /** @kccb.rtn_q: Waitqueue for KCCB command return waiters. */
                wait_queue_head_t rtn_q;
 
-               /** @rtn_obj: Object representing KCCB return slots. */
+               /** @kccb.rtn_obj: Object representing KCCB return slots. */
                struct pvr_fw_object *rtn_obj;
 
                /**
-                * @rtn: Pointer to CPU mapping of KCCB return slots. Must be
-                * accessed by READ_ONCE()/WRITE_ONCE().
+                * @kccb.rtn: Pointer to CPU mapping of KCCB return slots.
+                * Must be accessed by READ_ONCE()/WRITE_ONCE().
                 */
                u32 *rtn;
 
-               /** @slot_count: Total number of KCCB slots available. */
+               /** @kccb.slot_count: Total number of KCCB slots available. */
                u32 slot_count;
 
-               /** @reserved_count: Number of KCCB slots reserved for future use. */
+               /** @kccb.reserved_count: Number of KCCB slots reserved for
+                *  future use. */
                u32 reserved_count;
 
                /**
-                * @waiters: List of KCCB slot waiters.
+                * @kccb.waiters: List of KCCB slot waiters.
                 */
                struct list_head waiters;
 
-               /** @fence_ctx: KCCB fence context. */
+               /** @kccb.fence_ctx: KCCB fence context. */
                struct {
-                       /** @id: KCCB fence context ID allocated with dma_fence_context_alloc(). */
+                       /** @kccb.fence_ctx.id: KCCB fence context ID
+                        *  allocated with dma_fence_context_alloc(). */
                        u64 id;
 
-                       /** @seqno: Sequence number incremented each time a fence is created. */
+                       /** @kccb.fence_ctx.seqno: Sequence number incremented
+                        *  each time a fence is created. */
                        atomic_t seqno;
 
                        /**
-                        * @lock: Lock used to synchronize access to fences allocated by this
-                        * context.
+                        * @kccb.fence_ctx.lock: Lock used to synchronize
+                        * access to fences allocated by this context.
                         */
                        spinlock_t lock;
                } fence_ctx;
index a6602c0126715635d6328c2fb295d4195b7dd873..3dda885df5b223dc2b637592e50cc7e958b5cbb7 100644 (file)
@@ -108,6 +108,9 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
        } else {
                ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
                                   mem->mem.size, &tmp);
+               if (ret)
+                       goto done;
+
                vma->addr = tmp.addr;
        }
 
index c8ce7ff187135b0992b52a3c62d8a48593b2e625..e74493a4569edb933e7d55c817709cb93e701a56 100644 (file)
@@ -550,6 +550,10 @@ ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
                struct nvkm_engn *engn = list_first_entry(&runl->engns, typeof(*engn), head);
 
                runl->nonstall.vector = engn->func->nonstall(engn);
+
+               /* if no nonstall vector just keep going */
+               if (runl->nonstall.vector == -1)
+                       continue;
                if (runl->nonstall.vector < 0) {
                        RUNL_ERROR(runl, "nonstall %d", runl->nonstall.vector);
                        return runl->nonstall.vector;
index d088e636edc31c407582bdbe8ab0ee28653956bf..de2ebe8f21348c06d7585e1a7b02ae28d8fa29c1 100644 (file)
@@ -350,7 +350,7 @@ r535_engn_nonstall(struct nvkm_engn *engn)
        int ret;
 
        ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
-       WARN_ON(ret < 0);
+       WARN_ON(ret == -ENOENT);
        return ret;
 }
 
index 04bceaa28a197d93d85db77098e9f8330c63cff0..da1bebb896f7fb1ec5bba033600f16d23f29f96c 100644 (file)
@@ -25,12 +25,8 @@ int
 nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
 {
        for (int i = 0; i < gsp->intr_nr; i++) {
-               if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
-                       if (gsp->intr[i].nonstall != ~0)
-                               return gsp->intr[i].nonstall;
-
-                       return -EINVAL;
-               }
+               if (gsp->intr[i].type == type && gsp->intr[i].inst == inst)
+                       return gsp->intr[i].nonstall;
        }
 
        return -ENOENT;
index 44508c2dd61455943613bc002e52e3c2cdf25ae8..85b3b4871a1d63bf5a8cb2315a25dfd5ef2b8b70 100644 (file)
@@ -35,7 +35,6 @@
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_gem.h"
-#include "rockchip_drm_fb.h"
 #include "rockchip_drm_vop2.h"
 #include "rockchip_rgb.h"
 
@@ -959,12 +958,6 @@ static void vop2_enable(struct vop2 *vop2)
                return;
        }
 
-       ret = regmap_reinit_cache(vop2->map, &vop2_regmap_config);
-       if (ret) {
-               drm_err(vop2->drm, "failed to reinit cache: %d\n", ret);
-               return;
-       }
-
        if (vop2->data->soc_id == 3566)
                vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
 
@@ -996,6 +989,8 @@ static void vop2_disable(struct vop2 *vop2)
 
        pm_runtime_put_sync(vop2->dev);
 
+       regcache_drop_region(vop2->map, 0, vop2_regmap_config.max_register);
+
        clk_disable_unprepare(vop2->pclk);
        clk_disable_unprepare(vop2->aclk);
        clk_disable_unprepare(vop2->hclk);
@@ -1685,7 +1680,6 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
        unsigned long dclk_core_rate = v_pixclk >> 2;
        unsigned long dclk_rate = v_pixclk;
        unsigned long dclk_out_rate;
-       unsigned long if_dclk_rate;
        unsigned long if_pixclk_rate;
        int K = 1;
 
@@ -1700,13 +1694,13 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
                }
 
                if_pixclk_rate = (dclk_core_rate << 1) / K;
-               if_dclk_rate = dclk_core_rate / K;
                /*
+                * if_dclk_rate = dclk_core_rate / K;
                 * *if_pixclk_div = dclk_rate / if_pixclk_rate;
                 * *if_dclk_div = dclk_rate / if_dclk_rate;
                 */
-                *if_pixclk_div = 2;
-                *if_dclk_div = 4;
+               *if_pixclk_div = 2;
+               *if_dclk_div = 4;
        } else if (vop2_output_if_is_edp(id)) {
                /*
                 * edp_pixclk = edp_dclk > dclk_core
index f843a50d5dce6df10b405ae26853a5d01dbd7c0c..94eafcecc65b0c878e20e7a3f4a528f8a56e60c1 100644 (file)
@@ -62,9 +62,9 @@ static const struct v3d_reg_def v3d_core_reg_defs[] = {
        REGDEF(33, 71, V3D_PTB_BPCA),
        REGDEF(33, 71, V3D_PTB_BPCS),
 
-       REGDEF(33, 41, V3D_GMP_STATUS(33)),
-       REGDEF(33, 41, V3D_GMP_CFG(33)),
-       REGDEF(33, 41, V3D_GMP_VIO_ADDR(33)),
+       REGDEF(33, 42, V3D_GMP_STATUS(33)),
+       REGDEF(33, 42, V3D_GMP_CFG(33)),
+       REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)),
 
        REGDEF(33, 71, V3D_ERR_FDBGO),
        REGDEF(33, 71, V3D_ERR_FDBGB),
@@ -74,13 +74,13 @@ static const struct v3d_reg_def v3d_core_reg_defs[] = {
 
 static const struct v3d_reg_def v3d_csd_reg_defs[] = {
        REGDEF(41, 71, V3D_CSD_STATUS),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG0(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG1(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG2(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG3(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG4(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG5(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG6(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)),
        REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)),
        REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)),
        REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),
index 0f9c73d2e70ef29824b24e6dffdba1fbe0446529..9060f9fae6f12c5c67dd96ecdb977196bf084884 100644 (file)
@@ -92,7 +92,7 @@ struct drm_gpuva {
         */
        struct {
                /**
-                * @addr: the start address
+                * @va.addr: the start address
                 */
                u64 addr;
 
@@ -107,17 +107,17 @@ struct drm_gpuva {
         */
        struct {
                /**
-                * @offset: the offset within the &drm_gem_object
+                * @gem.offset: the offset within the &drm_gem_object
                 */
                u64 offset;
 
                /**
-                * @obj: the mapped &drm_gem_object
+                * @gem.obj: the mapped &drm_gem_object
                 */
                struct drm_gem_object *obj;
 
                /**
-                * @entry: the &list_head to attach this object to a &drm_gpuvm_bo
+                * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo
                 */
                struct list_head entry;
        } gem;
@@ -127,12 +127,12 @@ struct drm_gpuva {
         */
        struct {
                /**
-                * @rb: the rb-tree node
+                * @rb.node: the rb-tree node
                 */
                struct rb_node node;
 
                /**
-                * @entry: The &list_head to additionally connect &drm_gpuvas
+                * @rb.entry: The &list_head to additionally connect &drm_gpuvas
                 * in the same order they appear in the interval tree. This is
                 * useful to keep iterating &drm_gpuvas from a start node found
                 * through the rb-tree while doing modifications on the rb-tree
@@ -141,7 +141,7 @@ struct drm_gpuva {
                struct list_head entry;
 
                /**
-                * @__subtree_last: needed by the interval tree, holding last-in-subtree
+                * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree
                 */
                u64 __subtree_last;
        } rb;
@@ -187,6 +187,8 @@ static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
  * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
  * is invalidated
  * @va: the &drm_gpuva to check
+ *
+ * Returns: %true if the GPU VA is invalidated, %false otherwise
  */
 static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
 {
@@ -252,12 +254,12 @@ struct drm_gpuvm {
         */
        struct {
                /**
-                * @tree: the rb-tree to track GPU VA mappings
+                * @rb.tree: the rb-tree to track GPU VA mappings
                 */
                struct rb_root_cached tree;
 
                /**
-                * @list: the &list_head to track GPU VA mappings
+                * @rb.list: the &list_head to track GPU VA mappings
                 */
                struct list_head list;
        } rb;
@@ -290,19 +292,19 @@ struct drm_gpuvm {
         */
        struct {
                /**
-                * @list: &list_head storing &drm_gpuvm_bos serving as
+                * @extobj.list: &list_head storing &drm_gpuvm_bos serving as
                 * external object
                 */
                struct list_head list;
 
                /**
-                * @local_list: pointer to the local list temporarily storing
-                * entries from the external object list
+                * @extobj.local_list: pointer to the local list temporarily
+                * storing entries from the external object list
                 */
                struct list_head *local_list;
 
                /**
-                * @lock: spinlock to protect the extobj list
+                * @extobj.lock: spinlock to protect the extobj list
                 */
                spinlock_t lock;
        } extobj;
@@ -312,19 +314,19 @@ struct drm_gpuvm {
         */
        struct {
                /**
-                * @list: &list_head storing &drm_gpuvm_bos currently being
-                * evicted
+                * @evict.list: &list_head storing &drm_gpuvm_bos currently
+                * being evicted
                 */
                struct list_head list;
 
                /**
-                * @local_list: pointer to the local list temporarily storing
-                * entries from the evicted object list
+                * @evict.local_list: pointer to the local list temporarily
+                * storing entries from the evicted object list
                 */
                struct list_head *local_list;
 
                /**
-                * @lock: spinlock to protect the evict list
+                * @evict.lock: spinlock to protect the evict list
                 */
                spinlock_t lock;
        } evict;
@@ -344,6 +346,8 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
  *
  * This function acquires an additional reference to @gpuvm. It is illegal to
  * call this without already holding a reference. No locks required.
+ *
+ * Returns: the &struct drm_gpuvm pointer
  */
 static inline struct drm_gpuvm *
 drm_gpuvm_get(struct drm_gpuvm *gpuvm)
@@ -533,12 +537,13 @@ struct drm_gpuvm_exec {
         */
        struct {
                /**
-                * @fn: The driver callback to lock additional &drm_gem_objects.
+                * @extra.fn: The driver callback to lock additional
+                * &drm_gem_objects.
                 */
                int (*fn)(struct drm_gpuvm_exec *vm_exec);
 
                /**
-                * @priv: driver private data for the @fn callback
+                * @extra.priv: driver private data for the @fn callback
                 */
                void *priv;
        } extra;
@@ -589,7 +594,7 @@ void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
                              enum dma_resv_usage extobj_usage);
 
 /**
- * drm_gpuvm_exec_resv_add_fence()
+ * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj
  * @vm_exec: the &drm_gpuvm_exec wrapper
  * @fence: fence to add
  * @private_usage: private dma-resv usage
@@ -608,10 +613,12 @@ drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
 }
 
 /**
- * drm_gpuvm_exec_validate()
+ * drm_gpuvm_exec_validate() - validate all BOs marked as evicted
  * @vm_exec: the &drm_gpuvm_exec wrapper
  *
  * See drm_gpuvm_validate().
+ *
+ * Returns: 0 on success, negative error code on failure.
  */
 static inline int
 drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
@@ -664,7 +671,7 @@ struct drm_gpuvm_bo {
         */
        struct {
                /**
-                * @gpuva: The list of linked &drm_gpuvas.
+                * @list.gpuva: The list of linked &drm_gpuvas.
                 *
                 * It is safe to access entries from this list as long as the
                 * GEM's gpuva lock is held. See also struct drm_gem_object.
@@ -672,25 +679,25 @@ struct drm_gpuvm_bo {
                struct list_head gpuva;
 
                /**
-                * @entry: Structure containing all &list_heads serving as
+                * @list.entry: Structure containing all &list_heads serving as
                 * entry.
                 */
                struct {
                        /**
-                        * @gem: List entry to attach to the &drm_gem_objects
-                        * gpuva list.
+                        * @list.entry.gem: List entry to attach to the
+                        * &drm_gem_objects gpuva list.
                         */
                        struct list_head gem;
 
                        /**
-                        * @evict: List entry to attach to the &drm_gpuvms
-                        * extobj list.
+                        * @list.entry.evict: List entry to attach to the
+                        * &drm_gpuvms extobj list.
                         */
                        struct list_head extobj;
 
                        /**
-                        * @evict: List entry to attach to the &drm_gpuvms evict
-                        * list.
+                        * @list.entry.evict: List entry to attach to the
+                        * &drm_gpuvms evict list.
                         */
                        struct list_head evict;
                } entry;
@@ -713,6 +720,8 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
  *
  * This function acquires an additional reference to @vm_bo. It is illegal to
  * call this without already holding a reference. No locks required.
+ *
+ * Returns: the &struct vm_bo pointer
  */
 static inline struct drm_gpuvm_bo *
 drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
@@ -730,7 +739,8 @@ drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
 void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
 
 /**
- * drm_gpuvm_bo_gem_evict()
+ * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list
+ * to/from the &drm_gpuvms evicted list
  * @obj: the &drm_gem_object
  * @evict: indicates whether @obj is evicted
  *
@@ -817,12 +827,12 @@ struct drm_gpuva_op_map {
         */
        struct {
                /**
-                * @addr: the base address of the new mapping
+                * @va.addr: the base address of the new mapping
                 */
                u64 addr;
 
                /**
-                * @range: the range of the new mapping
+                * @va.range: the range of the new mapping
                 */
                u64 range;
        } va;
@@ -832,12 +842,12 @@ struct drm_gpuva_op_map {
         */
        struct {
                /**
-                * @offset: the offset within the &drm_gem_object
+                * @gem.offset: the offset within the &drm_gem_object
                 */
                u64 offset;
 
                /**
-                * @obj: the &drm_gem_object to map
+                * @gem.obj: the &drm_gem_object to map
                 */
                struct drm_gem_object *obj;
        } gem;
index 218edb0a96f8c043df13a5bf25f85ec754ee449a..fd4f9574d177a269b2cdbe5a36b3b30f2addbc94 100644 (file)
@@ -693,7 +693,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_EXEC_FENCE       44
 
 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
- * user specified bufffers for post-mortem debugging of GPU hangs. See
+ * user-specified buffers for post-mortem debugging of GPU hangs. See
  * EXEC_OBJECT_CAPTURE.
  */
 #define I915_PARAM_HAS_EXEC_CAPTURE     45
@@ -1606,7 +1606,7 @@ struct drm_i915_gem_busy {
         * is accurate.
         *
         * The returned dword is split into two fields to indicate both
-        * the engine classess on which the object is being read, and the
+        * the engine classes on which the object is being read, and the
         * engine class on which it is currently being written (if any).
         *
         * The low word (bits 0:15) indicate if the object is being written
@@ -1815,7 +1815,7 @@ struct drm_i915_gem_madvise {
        __u32 handle;
 
        /* Advice: either the buffer will be needed again in the near future,
-        *         or wont be and could be discarded under memory pressure.
+        *         or won't be and could be discarded under memory pressure.
         */
        __u32 madv;
 
@@ -3246,7 +3246,7 @@ struct drm_i915_query_topology_info {
  *     // enough to hold our array of engines. The kernel will fill out the
  *     // item.length for us, which is the number of bytes we need.
  *     //
- *     // Alternatively a large buffer can be allocated straight away enabling
+ *     // Alternatively a large buffer can be allocated straightaway enabling
  *     // querying in one pass, in which case item.length should contain the
  *     // length of the provided buffer.
  *     err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
@@ -3256,7 +3256,7 @@ struct drm_i915_query_topology_info {
  *     // Now that we allocated the required number of bytes, we call the ioctl
  *     // again, this time with the data_ptr pointing to our newly allocated
  *     // blob, which the kernel can then populate with info on all engines.
- *     item.data_ptr = (uintptr_t)&info,
+ *     item.data_ptr = (uintptr_t)&info;
  *
  *     err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
  *     if (err) ...
@@ -3286,7 +3286,7 @@ struct drm_i915_query_topology_info {
 /**
  * struct drm_i915_engine_info
  *
- * Describes one engine and it's capabilities as known to the driver.
+ * Describes one engine and its capabilities as known to the driver.
  */
 struct drm_i915_engine_info {
        /** @engine: Engine class and instance. */