Merge tag 'drm-next-2024-03-22' of https://gitlab.freedesktop.org/drm/kernel
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Mar 2024 02:04:31 +0000 (19:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Mar 2024 02:04:31 +0000 (19:04 -0700)
Pull drm fixes from Dave Airlie:
 "Fixes from the last week (or 3 weeks in amdgpu case), after amdgpu,
  it's xe and nouveau then a few scattered core fixes.

  core:
   - fix rounding in drm_fixp2int_round()

  bridge:
   - fix documentation for DRM_BRIDGE_OP_EDID

  sun4i:
   - fix 64-bit division on 32-bit architectures

  tests:
   - fix dependency on DRM_KMS_HELPER

  probe-helper:
   - never return negative values from .get_modes() plus driver fixes

  xe:
   - invalidate userptr vma on page pin fault
   - fail early on sysfs file creation error
   - skip VMA pinning on xe_exec if no batches

  nouveau:
   - clear bo resource bus after eviction
   - documentation fixes
   - don't check devinit disable on GSP

  amdgpu:
   - Freesync fixes
   - UAF IOCTL fixes
   - Fix mmhub client ID mapping
   - IH 7.0 fix
   - DML2 fixes
   - VCN 4.0.6 fix
   - GART bind fix
   - GPU reset fix
   - SR-IOV fix
   - OD table handling fixes
   - Fix TA handling on boards without display hardware
   - DML1 fix
   - ABM fix
   - eDP panel fix
   - DPPCLK fix
   - HDCP fix
   - Revert incorrect error case handling in ioremap
   - VPE fix
   - HDMI fixes
   - SDMA 4.4.2 fix
   - Other misc fixes

  amdkfd:
   - Fix duplicate BO handling in process restore"

* tag 'drm-next-2024-03-22' of https://gitlab.freedesktop.org/drm/kernel: (50 commits)
  drm/amdgpu/pm: Don't use OD table on Arcturus
  drm/amdgpu: drop setting buffer funcs in sdma442
  drm/amd/display: Fix noise issue on HDMI AV mute
  drm/amd/display: Revert Remove pixle rate limit for subvp
  Revert "drm/amdgpu/vpe: don't emit cond exec command under collaborate mode"
  Revert "drm/amd/amdgpu: Fix potential ioremap() memory leaks in amdgpu_device_init()"
  drm/amd/display: Add a dc_state NULL check in dc_state_release
  drm/amd/display: Return the correct HDCP error code
  drm/amd/display: Implement wait_for_odm_update_pending_complete
  drm/amd/display: Lock all enabled otg pipes even with no planes
  drm/amd/display: Amend coasting vtotal for replay low hz
  drm/amd/display: Fix idle check for shared firmware state
  drm/amd/display: Update odm when ODM combine is changed on an otg master pipe with no plane
  drm/amd/display: Init DPPCLK from SMU on dcn32
  drm/amd/display: Add monitor patch for specific eDP
  drm/amd/display: Allow dirty rects to be sent to dmub when abm is active
  drm/amd/display: Override min required DCFCLK in dml1_validate
  drm/amdgpu: Bypass display ta if display hw is not available
  drm/amdgpu: correct the KGQ fallback message
  drm/amdgpu/pm: Check the validity of overdiver power limit
  ...

86 files changed:
drivers/gpu/drm/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_state.c
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
drivers/gpu/drm/amd/display/dc/inc/link.h
drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.h
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/bridge/lontium-lt8912b.c
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
drivers/gpu/drm/drm_panel.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/exynos/exynos_dp.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/imx/ipuv3/parallel-display.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/xe/xe_exec.c
drivers/gpu/drm/xe/xe_gt_pagefault.c
drivers/gpu/drm/xe/xe_trace.h
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h
drivers/gpu/drm/xe/xe_vram_freq.c
include/drm/drm_bridge.h
include/drm/drm_fixed.h
include/drm/drm_modeset_helper_vtables.h

index 182ed8f67850033a41ae1bad8c1b5ea0620ebb9d..5a0c476361c300273d0c6cf27306b14c59679cbe 100644 (file)
@@ -68,6 +68,7 @@ config DRM_USE_DYNAMIC_DEBUG
 config DRM_KUNIT_TEST_HELPERS
        tristate
        depends on DRM && KUNIT
+       select DRM_KMS_HELPER
        help
          KUnit Helpers for KMS drivers.
 
@@ -80,7 +81,6 @@ config DRM_KUNIT_TEST
        select DRM_EXEC
        select DRM_EXPORT_FOR_TESTS if m
        select DRM_GEM_SHMEM_HELPER
-       select DRM_KMS_HELPER
        select DRM_KUNIT_TEST_HELPERS
        select DRM_LIB_RANDOM
        select PRIME_NUMBERS
index f5f2945711be0c215d6a812d217c402616fc1cef..35dd6effa9a34a1be9ce83f83ab7915a38f6b4e8 100644 (file)
@@ -146,7 +146,7 @@ int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
 {
        int ret;
 
-       if (!adev->kfd.init_complete)
+       if (!adev->kfd.init_complete || adev->kfd.client.dev)
                return 0;
 
        ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
index 14dc9d2d8d53ad0e4085fb137b42d9b2967f3b33..df58a6a1a67ec51f1bb81ff1bd8364be8a46cc13 100644 (file)
@@ -2869,14 +2869,16 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
 
        mutex_lock(&process_info->lock);
 
-       drm_exec_init(&exec, 0, 0);
+       drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
        drm_exec_until_all_locked(&exec) {
                list_for_each_entry(peer_vm, &process_info->vm_list_head,
                                    vm_list_node) {
                        ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
                        drm_exec_retry_on_contention(&exec);
-                       if (unlikely(ret))
+                       if (unlikely(ret)) {
+                               pr_err("Locking VM PD failed, ret: %d\n", ret);
                                goto ttm_reserve_fail;
+                       }
                }
 
                /* Reserve all BOs and page tables/directory. Add all BOs from
@@ -2889,8 +2891,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
                        gobj = &mem->bo->tbo.base;
                        ret = drm_exec_prepare_obj(&exec, gobj, 1);
                        drm_exec_retry_on_contention(&exec);
-                       if (unlikely(ret))
+                       if (unlikely(ret)) {
+                               pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
                                goto ttm_reserve_fail;
+                       }
                }
        }
 
@@ -2950,8 +2954,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
         * validations above would invalidate DMABuf imports again.
         */
        ret = process_validate_vms(process_info, &exec.ticket);
-       if (ret)
+       if (ret) {
+               pr_debug("Validating VMs failed, ret: %d\n", ret);
                goto validate_map_fail;
+       }
 
        /* Update mappings not managed by KFD */
        list_for_each_entry(peer_vm, &process_info->vm_list_head,
index 1e9454e6e4cb4edaccd731b415a049ea83cff45f..5dc24c971b41f0a93c7463fbbf397d33e08c5563 100644 (file)
@@ -4040,10 +4040,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
         * early on during init and before calling to RREG32.
         */
        adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
-       if (!adev->reset_domain) {
-               r = -ENOMEM;
-               goto unmap_memory;
-       }
+       if (!adev->reset_domain)
+               return -ENOMEM;
 
        /* detect hw virtualization here */
        amdgpu_detect_virtualization(adev);
@@ -4053,7 +4051,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_device_get_job_timeout_settings(adev);
        if (r) {
                dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
-               goto unmap_memory;
+               return r;
        }
 
        amdgpu_device_set_mcbp(adev);
@@ -4061,12 +4059,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        /* early init functions */
        r = amdgpu_device_ip_early_init(adev);
        if (r)
-               goto unmap_memory;
+               return r;
 
        /* Get rid of things like offb */
        r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
        if (r)
-               goto unmap_memory;
+               return r;
 
        /* Enable TMZ based on IP_VERSION */
        amdgpu_gmc_tmz_set(adev);
@@ -4076,7 +4074,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (adev->gmc.xgmi.supported) {
                r = adev->gfxhub.funcs->get_xgmi_info(adev);
                if (r)
-                       goto unmap_memory;
+                       return r;
        }
 
        /* enable PCIE atomic ops */
@@ -4345,8 +4343,6 @@ release_ras_con:
 failed:
        amdgpu_vf_error_trans_all(adev);
 
-unmap_memory:
-       iounmap(adev->rmmio);
        return r;
 }
 
index 15b188aaf681805af714f0ee6d8d660e70ed81d3..80b9642f2bc4f25c69e9f30c70138f073e0c6cd2 100644 (file)
@@ -2479,8 +2479,11 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
        }
        for (i = 0; i < mgpu_info.num_dgpu; i++) {
                adev = mgpu_info.gpu_ins[i].adev;
-               if (!adev->kfd.init_complete)
+               if (!adev->kfd.init_complete) {
+                       kgd2kfd_init_zone_device(adev);
                        amdgpu_amdkfd_device_init(adev);
+                       amdgpu_amdkfd_drm_client_create(adev);
+               }
                amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 }
index f8b48fd93108cecc0ef75a358323c89274df2815..55d5508987ffe57979bbec08203ff85675a1031a 100644 (file)
@@ -687,7 +687,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
        r = amdgpu_ring_test_helper(kiq_ring);
        spin_unlock(&kiq->ring_lock);
        if (r)
-               DRM_ERROR("KCQ enable failed\n");
+               DRM_ERROR("KGQ enable failed\n");
 
        return r;
 }
index 55b65fc04b651ee36196b038e7dda5b82e6e2051..431ec72655ec80f42d6f7f01e7c3fe1191b18f26 100644 (file)
@@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
  */
 int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
 {
+       int r;
+
        if (bo->kfd_bo)
-               return mmu_interval_notifier_insert(&bo->notifier, current->mm,
+               r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
                                                    addr, amdgpu_bo_size(bo),
                                                    &amdgpu_hmm_hsa_ops);
-       return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
-                                           amdgpu_bo_size(bo),
-                                           &amdgpu_hmm_gfx_ops);
+       else
+               r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+                                                       amdgpu_bo_size(bo),
+                                                       &amdgpu_hmm_gfx_ops);
+       if (r)
+               /*
+                * Make sure amdgpu_hmm_unregister() doesn't call
+                * mmu_interval_notifier_remove() when the notifier isn't properly
+                * initialized.
+                */
+               bo->notifier.mm = NULL;
+
+       return r;
 }
 
 /**
index 3c2b1413058bb790c3aa3da792934200a967ec29..94b310fdb719d4c09717e0829b2c40adeae1b8d4 100644 (file)
@@ -1830,6 +1830,10 @@ static int psp_hdcp_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
+       /* bypass hdcp initialization if dmu is harvested */
+       if (!amdgpu_device_has_display_hardware(psp->adev))
+               return 0;
+
        if (!psp->hdcp_context.context.bin_desc.size_bytes ||
            !psp->hdcp_context.context.bin_desc.start_addr) {
                dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
@@ -1862,6 +1866,9 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
+       if (!psp->hdcp_context.context.initialized)
+               return 0;
+
        return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
 }
 
@@ -1897,6 +1904,10 @@ static int psp_dtm_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
+       /* bypass dtm initialization if dmu is harvested */
+       if (!amdgpu_device_has_display_hardware(psp->adev))
+               return 0;
+
        if (!psp->dtm_context.context.bin_desc.size_bytes ||
            !psp->dtm_context.context.bin_desc.start_addr) {
                dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
@@ -1929,6 +1940,9 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
+       if (!psp->dtm_context.context.initialized)
+               return 0;
+
        return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
 }
 
@@ -2063,6 +2077,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
+       /* bypass securedisplay initialization if dmu is harvested */
+       if (!amdgpu_device_has_display_hardware(psp->adev))
+               return 0;
+
        if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
            !psp->securedisplay_context.context.bin_desc.start_addr) {
                dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
index 8722beba494e563fd45c3e2f3cfba05a7f9d38e8..fc418e670fdae27b699bdbefce8051ab128ab76c 100644 (file)
@@ -864,6 +864,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
                amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
                                 gtt->ttm.dma_address, flags);
        }
+       gtt->bound = true;
 }
 
 /*
index b2535023764f494d7ae91a3d0fa15cc89080200b..9c514a606a2f4d7da4697c8fb14e604f239216fc 100644 (file)
@@ -60,6 +60,7 @@
 #define FIRMWARE_VCN4_0_4              "amdgpu/vcn_4_0_4.bin"
 #define FIRMWARE_VCN4_0_5              "amdgpu/vcn_4_0_5.bin"
 #define FIRMWARE_VCN4_0_6              "amdgpu/vcn_4_0_6.bin"
+#define FIRMWARE_VCN4_0_6_1            "amdgpu/vcn_4_0_6_1.bin"
 #define FIRMWARE_VCN5_0_0              "amdgpu/vcn_5_0_0.bin"
 
 MODULE_FIRMWARE(FIRMWARE_RAVEN);
@@ -85,6 +86,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
 MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
 MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
 MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
 MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
 
 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -93,14 +95,22 @@ int amdgpu_vcn_early_init(struct amdgpu_device *adev)
 {
        char ucode_prefix[30];
        char fw_name[40];
-       int r;
+       int r, i;
 
-       amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
-       r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
-       if (r)
-               amdgpu_ucode_release(&adev->vcn.fw);
+       for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+               amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
+               snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+               if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==  IP_VERSION(4, 0, 6) &&
+                       i == 1) {
+                       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i);
+               }
 
+               r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], fw_name);
+               if (r) {
+                       amdgpu_ucode_release(&adev->vcn.fw[i]);
+                       return r;
+               }
+       }
        return r;
 }
 
@@ -141,7 +151,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
                }
        }
 
-       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+       hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
        adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 
        /* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -256,9 +266,10 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 
                for (i = 0; i < adev->vcn.num_enc_rings; ++i)
                        amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
+
+               amdgpu_ucode_release(&adev->vcn.fw[j]);
        }
 
-       amdgpu_ucode_release(&adev->vcn.fw);
        mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
        mutex_destroy(&adev->vcn.vcn_pg_lock);
 
@@ -354,11 +365,12 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
                        const struct common_firmware_header *hdr;
                        unsigned int offset;
 
-                       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+                       hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
                        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
                                offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
                                if (drm_dev_enter(adev_to_drm(adev), &idx)) {
-                                       memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
+                                       memcpy_toio(adev->vcn.inst[i].cpu_addr,
+                                                   adev->vcn.fw[i]->data + offset,
                                                    le32_to_cpu(hdr->ucode_size_bytes));
                                        drm_dev_exit(idx);
                                }
@@ -1043,11 +1055,11 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                const struct common_firmware_header *hdr;
 
-               hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
-
                for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
                        if (adev->vcn.harvest_config & (1 << i))
                                continue;
+
+                       hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
                        /* currently only support 2 FW instances */
                        if (i >= 2) {
                                dev_info(adev->dev, "More then 2 VCN FW instances!\n");
@@ -1055,7 +1067,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
                        }
                        idx = AMDGPU_UCODE_ID_VCN + i;
                        adev->firmware.ucode[idx].ucode_id = idx;
-                       adev->firmware.ucode[idx].fw = adev->vcn.fw;
+                       adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
                        adev->firmware.fw_size +=
                                ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 
index 1985f71b4373b8bac2c40f19894587725c163b3c..a418393d89ec91acda8400622b7d8915bc89c968 100644 (file)
@@ -306,7 +306,7 @@ struct amdgpu_vcn_ras {
 struct amdgpu_vcn {
        unsigned                fw_version;
        struct delayed_work     idle_work;
-       const struct firmware   *fw;    /* VCN firmware */
+       const struct firmware   *fw[AMDGPU_MAX_VCN_INSTANCES];  /* VCN firmware */
        unsigned                num_enc_rings;
        enum amd_powergating_state cur_state;
        bool                    indirect_sram;
index 70c5cc80ecdc009f3f9e434d5847b0a8736c7ddc..7a65a2b128ec4372c5e6c219c70b9ab110b869fc 100644 (file)
@@ -575,9 +575,6 @@ static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
 {
        unsigned int ret;
 
-       if (ring->adev->vpe.collaborate_mode)
-               return ~0;
-
        amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
        amdgpu_ring_write(ring, lower_32_bits(addr));
        amdgpu_ring_write(ring, upper_32_bits(addr));
index 904b9ff5ead2f5d1823689e826b3dd01688851e9..f90905ef32c76d62c3d490445b71388e9e0dc6bb 100644 (file)
@@ -3657,6 +3657,9 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
 
 static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
 {
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(10, 1, 10):
                soc15_program_register_sequence(adev,
@@ -4982,7 +4985,8 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
        u32 tmp;
        int i;
 
-       WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+       if (!amdgpu_sriov_vf(adev))
+               WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
 
        gfx_v10_0_setup_rb(adev);
        gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
@@ -7163,7 +7167,7 @@ static int gfx_v10_0_hw_init(void *handle)
        if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
                gfx_v10_3_program_pbb_mode(adev);
 
-       if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0) && !amdgpu_sriov_vf(adev))
                gfx_v10_3_set_power_brake_sequence(adev);
 
        return r;
index cd0e8a321e460476d36e4c5b82922b7b2ab3cadb..17509f32f61a4f32b7fff17aeeba6369bdce5689 100644 (file)
@@ -155,6 +155,9 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
 {
        uint64_t value;
 
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        /* Program the AGP BAR */
        WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
        WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
index 16fe428c0722d2577212bfdc4f95554214d68c52..7aed96fa10a9d20bb3e856982e48c7de828cf4ff 100644 (file)
@@ -418,6 +418,12 @@ static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
        tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+       /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+        * can be detected.
+        */
+       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+       WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
 out:
        return (wptr & ih->ptr_mask);
 }
index b3961968c10c4cf84ac76090431b2e9f712e586c..238ea40c245002a6b5af170f564793ee2077f3f7 100644 (file)
@@ -99,16 +99,15 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
        switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
        case IP_VERSION(3, 3, 0):
        case IP_VERSION(3, 3, 1):
-               mmhub_cid = mmhub_client_ids_v3_3[cid][rw];
+               mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
+                           mmhub_client_ids_v3_3[cid][rw] :
+                           cid == 0x140 ? "UMSCH" : NULL;
                break;
        default:
                mmhub_cid = NULL;
                break;
        }
 
-       if (!mmhub_cid && cid == 0x140)
-               mmhub_cid = "UMSCH";
-
        dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
                mmhub_cid ? mmhub_cid : "unknown", cid);
        dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
index 2d904ee72701af9f90f518bfa732d821a0a3b5ad..34237a1b1f2e45c40989c2070bdc0ae071ee0c4b 100644 (file)
@@ -431,16 +431,11 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
        struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
        u32 doorbell_offset, doorbell;
        u32 rb_cntl, ib_cntl;
-       int i, unset = 0;
+       int i;
 
        for_each_inst(i, inst_mask) {
                sdma[i] = &adev->sdma.instance[i].ring;
 
-               if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
-                       unset = 1;
-               }
-
                rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
                WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
@@ -487,20 +482,10 @@ static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
 static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
                                       uint32_t inst_mask)
 {
-       struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
        u32 rb_cntl, ib_cntl;
        int i;
-       bool unset = false;
 
        for_each_inst(i, inst_mask) {
-               sdma[i] = &adev->sdma.instance[i].page;
-
-               if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
-                       (!unset)) {
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
-                       unset = true;
-               }
-
                rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
                                        RB_ENABLE, 0);
@@ -950,13 +935,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
                        r = amdgpu_ring_test_helper(page);
                        if (r)
                                return r;
-
-                       if (adev->mman.buffer_funcs_ring == page)
-                               amdgpu_ttm_set_buffer_funcs_status(adev, true);
                }
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return r;
index 25ba27151ac0f29498665e86cd7ec0c3b04a31b3..aaceecd558cf9693bc16a528d3be4430be80d5b4 100644 (file)
@@ -304,7 +304,7 @@ static int vcn_v1_0_resume(void *handle)
  */
 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
 {
-       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
        uint32_t offset;
 
        /* cache window 0: fw */
@@ -371,7 +371,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
 
 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
 {
-       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
        uint32_t offset;
 
        /* cache window 0: fw */
index 18794394c5a052b26ef009647616a8aba92efd9a..e357d8cf0c01540ca3f986b88e2a6e1892df6a34 100644 (file)
@@ -330,7 +330,7 @@ static int vcn_v2_0_resume(void *handle)
  */
 static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
 {
-       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
        uint32_t offset;
 
        if (amdgpu_sriov_vf(adev))
@@ -386,7 +386,7 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
 
 static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
 {
-       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
        uint32_t offset;
 
        /* cache window 0: fw */
@@ -1878,7 +1878,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
 
                init_table += header->vcn_table_offset;
 
-               size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+               size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
 
                MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
                        SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
index aba403d718065776392dd4e159c1dac5878e232b..1cd8a94b0fbc2319f86f352b3fa6638ec7545d7c 100644 (file)
@@ -414,13 +414,15 @@ static int vcn_v2_5_resume(void *handle)
  */
 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
 {
-       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+       uint32_t size;
        uint32_t offset;
        int i;
 
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
+
+               size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
                /* cache window 0: fw */
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                        WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -469,7 +471,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
 
 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
 {
-       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
        uint32_t offset;
 
        /* cache window 0: fw */
@@ -1240,7 +1242,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
                        SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
                        ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
 
-               size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+               size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
                /* mc resume*/
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                        MMSCH_V1_0_INSERT_DIRECT_WT(
index e02af4de521c6f93c3dd57fa08457edfb2d237eb..8f82fb887e9c20c293b1390bb90c58ea9ff8ceea 100644 (file)
@@ -449,7 +449,7 @@ static int vcn_v3_0_resume(void *handle)
  */
 static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
 {
-       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4);
        uint32_t offset;
 
        /* cache window 0: fw */
@@ -499,7 +499,7 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
 
 static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
 {
-       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+       uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
        uint32_t offset;
 
        /* cache window 0: fw */
@@ -1332,7 +1332,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
                        mmUVD_STATUS),
                        ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
 
-               cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+               cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
 
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                        MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
index 8ab01ae919d2e36c8ff1c2226227c173223247be..832d15f7b5f61c0f22a3bf7779cca8108e0087e4 100644 (file)
@@ -382,7 +382,7 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
        uint32_t offset, size;
        const struct common_firmware_header *hdr;
 
-       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+       hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
        size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
        /* cache window 0: fw */
@@ -442,7 +442,7 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
 {
        uint32_t offset, size;
        const struct common_firmware_header *hdr;
-       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+       hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
        size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
        /* cache window 0: fw */
@@ -1289,7 +1289,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
                        regUVD_STATUS),
                        ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
 
-               cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+               cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
 
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                        MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
index 810bbfccd6f2eacb0269e554bbfe66b014919c42..203fa988322bdd93a009a2d21a42eb42217d6996 100644 (file)
@@ -332,7 +332,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
        uint32_t offset, size, vcn_inst;
        const struct common_firmware_header *hdr;
 
-       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+       hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
        size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
        vcn_inst = GET_INST(VCN, inst_idx);
@@ -407,7 +407,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
        uint32_t offset, size;
        const struct common_firmware_header *hdr;
 
-       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+       hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
        size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
        /* cache window 0: fw */
@@ -894,7 +894,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
                MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
                        ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
 
-               cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+               cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
 
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                        MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
index 0468955338b755f636e60f2943a7719b5e82bd77..501e53e69f2a0ca94076cd5616fbea1646ca4af1 100644 (file)
@@ -45,7 +45,7 @@
 #define mmUVD_DPG_LMA_DATA_BASE_IDX                                    regUVD_DPG_LMA_DATA_BASE_IDX
 
 #define VCN_VID_SOC_ADDRESS_2_0                                                0x1fb00
-#define VCN1_VID_SOC_ADDRESS_3_0                                       0x48300
+#define VCN1_VID_SOC_ADDRESS_3_0                                       (0x48300 + 0x38000)
 
 #define VCN_HARVEST_MMSCH                                                      0
 
@@ -329,7 +329,7 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
        uint32_t offset, size;
        const struct common_firmware_header *hdr;
 
-       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+       hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
        size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
        /* cache window 0: fw */
@@ -390,7 +390,7 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
        uint32_t offset, size;
        const struct common_firmware_header *hdr;
 
-       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+       hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
        size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
        /* cache window 0: fw */
@@ -486,7 +486,8 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
 
        /* VCN global tiling registers */
        WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
-               VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+               VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+               adev->gfx.config.gb_addr_config, 0, indirect);
 }
 
 /**
@@ -911,7 +912,6 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
                VCN, inst_idx, regUVD_MASTINT_EN),
                UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
 
-
        if (indirect)
                amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
 
index d6ee9958ba5fccb758715b8d6ff71cf5e2c7f6f0..bc60c554eb32960e166833d899094dbbb3442ad0 100644 (file)
@@ -290,7 +290,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
        uint32_t offset, size;
        const struct common_firmware_header *hdr;
 
-       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+       hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
        size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
        /* cache window 0: fw */
@@ -351,7 +351,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
        uint32_t offset, size;
        const struct common_firmware_header *hdr;
 
-       hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+       hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
        size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
        /* cache window 0: fw */
index 1c9c6096e28fb335ab9a9c90751a2cfd96b4b5dc..2851719d7121612b64842f3da8e84cf82d073454 100644 (file)
@@ -1767,6 +1767,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
                adev->dm.dc->debug.force_subvp_mclk_switch = true;
 
+       if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+               adev->dm.dc->debug.using_dml2 = true;
+
        adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
 
        /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
@@ -11271,18 +11274,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
        if (!adev->dm.freesync_module)
                goto update;
 
-       if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
-               || sink->sink_signal == SIGNAL_TYPE_EDP) {
+       if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+                    sink->sink_signal == SIGNAL_TYPE_EDP)) {
                bool edid_check_required = false;
 
-               if (edid) {
-                       edid_check_required = is_dp_capable_without_timing_msa(
-                                               adev->dm.dc,
-                                               amdgpu_dm_connector);
+               if (is_dp_capable_without_timing_msa(adev->dm.dc,
+                                                    amdgpu_dm_connector)) {
+                       if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+                               freesync_capable = true;
+                               amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+                               amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+                       } else {
+                               edid_check_required = edid->version > 1 ||
+                                                     (edid->version == 1 &&
+                                                      edid->revision > 1);
+                       }
                }
 
-               if (edid_check_required == true && (edid->version > 1 ||
-                  (edid->version == 1 && edid->revision > 1))) {
+               if (edid_check_required) {
                        for (i = 0; i < 4; i++) {
 
                                timing  = &edid->detailed_timings[i];
index 668f05c8654ef0f3d7e9d5989e76f802aa23247b..bec252e1dd27a98263b5bd3299cc3dfd8ed089e9 100644 (file)
@@ -216,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
        if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
                clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
 
+       /* DPPCLK */
+       dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK,
+                       &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
+                       &num_entries_per_clk->num_dppclk_levels);
+       num_levels = num_entries_per_clk->num_dppclk_levels;
+       clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK);
+       //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
+       if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950)
+               clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950;
+
        if (num_entries_per_clk->num_dcfclk_levels &&
                        num_entries_per_clk->num_dtbclk_levels &&
                        num_entries_per_clk->num_dispclk_levels)
@@ -240,6 +250,10 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
                                        = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz);
        }
 
+       for (i = 0; i < num_levels; i++)
+               if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950)
+                       clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950;
+
        /* Get UCLK, update bounding box */
        clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
 
index 5211c1c0f3c0cf0968cae6c168750dc34d7a0e9a..e7dc128f6284b45846f4eff707d3f052dab7b108 100644 (file)
@@ -1302,6 +1302,54 @@ static void disable_vbios_mode_if_required(
        }
 }
 
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+               struct dc_state *context)
+{
+       struct pipe_ctx *opp_head;
+       struct dce_hwseq *hws = dc->hwseq;
+       int i;
+
+       if (!hws->funcs.wait_for_blank_complete)
+               return;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               opp_head = &context->res_ctx.pipe_ctx[i];
+
+               if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+                               dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+                       continue;
+
+               hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+       }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+       struct pipe_ctx *otg_master;
+       struct timing_generator *tg;
+       int i;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               otg_master = &context->res_ctx.pipe_ctx[i];
+               if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+                               dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+                       continue;
+               tg = otg_master->stream_res.tg;
+               if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+                       tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+       }
+
+       /* ODM update may require to reprogram blank pattern for each OPP */
+       wait_for_blank_complete(dc, context);
+}
+
 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
 {
        int i;
@@ -1993,6 +2041,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
                context->stream_count == 0) {
                /* Must wait for no flips to be pending before doing optimize bw */
                wait_for_no_pipes_pending(dc, context);
+               /*
+                * optimized dispclk depends on ODM setup. Need to wait for ODM
+                * update pending complete before optimizing bandwidth.
+                */
+               wait_for_odm_update_pending_complete(dc, context);
                /* pplib is notified if disp_num changed */
                dc->hwss.optimize_bandwidth(dc, context);
                /* Need to do otg sync again as otg could be out of sync due to otg
@@ -3270,6 +3323,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s
        if (stream->link->replay_settings.config.replay_supported)
                return true;
 
+       if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
+               return true;
+
        return false;
 }
 
@@ -3493,7 +3549,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
                top_pipe_to_program->stream->update_flags.raw = 0;
 }
 
-static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
 {
 /*
  * This function calls HWSS to wait for any potentially double buffered
@@ -3531,6 +3587,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
                        }
                }
        }
+       wait_for_odm_update_pending_complete(dc, dc_context);
 }
 
 static void commit_planes_for_stream(struct dc *dc,
@@ -4844,22 +4901,16 @@ void dc_exit_ips_for_hw_access(struct dc *dc)
 
 bool dc_dmub_is_ips_idle_state(struct dc *dc)
 {
-       uint32_t idle_state = 0;
-
        if (dc->debug.disable_idle_power_optimizations)
                return false;
 
        if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
                return false;
 
-       if (dc->hwss.get_idle_state)
-               idle_state = dc->hwss.get_idle_state(dc);
-
-       if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
-               !(idle_state & DMUB_IPS2_ALLOW_MASK))
-               return true;
+       if (!dc->ctx->dmub_srv)
+               return false;
 
-       return false;
+       return dc->ctx->dmub_srv->idle_allowed;
 }
 
 /* set min and max memory clock to lowest and highest DPM level, respectively */
index 180ac47868c22a68c1af47096db95ecf6b11994c..5cc7f8da209c599f7585e8f10e499ef2118f34ff 100644 (file)
@@ -334,7 +334,8 @@ static void dc_state_free(struct kref *kref)
 
 void dc_state_release(struct dc_state *state)
 {
-       kref_put(&state->refcount, dc_state_free);
+       if (state != NULL)
+               kref_put(&state->refcount, dc_state_free);
 }
 /*
  * dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
index 9900dda2eef5cd2e44e6dbd008cd411194d107af..be2ac5c442a480f868a53667433384a452c65b66 100644 (file)
@@ -1085,9 +1085,9 @@ struct replay_settings {
        /* SMU optimization is enabled */
        bool replay_smu_opt_enable;
        /* Current Coasting vtotal */
-       uint16_t coasting_vtotal;
+       uint32_t coasting_vtotal;
        /* Coasting vtotal table */
-       uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+       uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
        /* Maximum link off frame count */
        enum replay_link_off_frame_count_level link_off_frame_count_level;
        /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
index 48a40dcc7050bd597ed7e5b7d645b0764db2c572..5838a11efd00c4e6cfce8b3d86e9c43413ae4374 100644 (file)
@@ -384,6 +384,7 @@ static const struct opp_funcs dcn10_opp_funcs = {
                .opp_set_disp_pattern_generator = NULL,
                .opp_program_dpg_dimensions = NULL,
                .dpg_is_blanked = NULL,
+               .dpg_is_pending = NULL,
                .opp_destroy = opp1_destroy
 };
 
index 0784d01986610d6be0407ff97318483191ead832..fbf1b6370eb23af3820fa092393229296a49a843 100644 (file)
@@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp)
                (double_buffer_pending == 0);
 }
 
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp)
+{
+       struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+       uint32_t double_buffer_pending;
+       uint32_t dpg_en;
+
+       REG_GET(DPG_CONTROL, DPG_EN, &dpg_en);
+
+       REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
+
+       return (dpg_en == 1 && double_buffer_pending == 1);
+}
+
 void opp2_program_left_edge_extra_pixel (
                struct output_pixel_processor *opp,
                bool count)
@@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = {
                .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
                .opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
                .dpg_is_blanked = opp2_dpg_is_blanked,
+               .dpg_is_pending = opp2_dpg_is_pending,
                .opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
                .opp_destroy = opp1_destroy,
                .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
index 3ab221bdd27dd24c40d5db6977c31d77e7667b14..8f186abd558db45a09dc92776e0b627fb1e0958b 100644 (file)
@@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions(
 
 bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
 
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp);
+
 void opp2_dpg_set_blank_color(
                struct output_pixel_processor *opp,
                const struct tg_color *color);
index 8e77db46a4090147217ab58f1795c7bc24030a39..6a71ba3dfc6327969d8f973913fee9c80d6aa387 100644 (file)
@@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = {
                .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
                .opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
                .dpg_is_blanked = opp2_dpg_is_blanked,
+               .dpg_is_pending = opp2_dpg_is_pending,
                .opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
                .opp_destroy = opp1_destroy,
                .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
index 87760600e154dad46e911e28f0b2937e6e012602..f98def6c8c2d23fc630f2c3d873335f5ad31c4ef 100644 (file)
@@ -782,3 +782,9 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc
                pipe_cnt++;
        }
 }
+
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
+{
+       if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
+               context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
+}
index b49e1dc9d8ba5154385d4fc396a9c243ec5cfb79..a0a65e0991041d90904c516c7279c5b8aa76967c 100644 (file)
@@ -623,6 +623,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
                 * - Not TMZ surface
                 */
                if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
+                               !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
                                (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
                                dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
                                (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
index 1ba6933d2b3617aa6d275647d17320dd0755ae69..17a58f41fc6a8501a4cf998507d9b89f68d0fcc1 100644 (file)
@@ -824,13 +824,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state
 
 static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
 {
+       dml_uint_t width, height;
+
+       if (in->timing.h_addressable > 3840)
+               width = 3840;
+       else
+               width = in->timing.h_addressable;       // 4K max
+
+       if (in->timing.v_addressable > 2160)
+               height = 2160;
+       else
+               height = in->timing.v_addressable;      // 4K max
+
        out->CursorBPP[location] = dml_cur_32bit;
        out->CursorWidth[location] = 256;
 
        out->GPUVMMinPageSizeKBytes[location] = 256;
 
-       out->ViewportWidth[location] = in->timing.h_addressable;
-       out->ViewportHeight[location] = in->timing.v_addressable;
+       out->ViewportWidth[location] = width;
+       out->ViewportHeight[location] = height;
        out->ViewportStationary[location] = false;
        out->ViewportWidthChroma[location] = 0;
        out->ViewportHeightChroma[location] = 0;
@@ -849,7 +861,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
        out->HTapsChroma[location] = 0;
        out->VTapsChroma[location] = 0;
        out->SourceScan[location] = dml_rotation_0;
-       out->ScalerRecoutWidth[location] = in->timing.h_addressable;
+       out->ScalerRecoutWidth[location] = width;
 
        out->LBBitPerPixel[location] = 57;
 
index 2a58a7687bdb5779db6c639d3cbf2277aaf231ae..72cca367062e163be2dbe41f7af452c515467a54 100644 (file)
@@ -703,13 +703,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
        return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
 }
 
-bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
 {
-       // Allocate Mode Lib Ctx
-       *dml2 = dml2_allocate_memory();
-
-       if (!(*dml2))
-               return false;
 
        // Store config options
        (*dml2)->config = *config;
@@ -737,9 +732,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options
        initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
 
        initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+}
+
+bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+{
+       // Allocate Mode Lib Ctx
+       *dml2 = dml2_allocate_memory();
+
+       if (!(*dml2))
+               return false;
+
+       dml2_init(in_dc, config, dml2);
 
-       /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/
-       //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init);
        return true;
 }
 
@@ -779,3 +783,11 @@ bool dml2_create_copy(struct dml2_context **dst_dml2,
 
        return true;
 }
+
+void dml2_reinit(const struct dc *in_dc,
+                                const struct dml2_configuration_options *config,
+                                struct dml2_context **dml2)
+{
+
+       dml2_init(in_dc, config, dml2);
+}
index ee0eb184eb6d7ea23bec303133cfaa76dc40854a..cc662d682fd4de03f475dfcb59ee68541c09b44c 100644 (file)
@@ -214,6 +214,9 @@ void dml2_copy(struct dml2_context *dst_dml2,
        struct dml2_context *src_dml2);
 bool dml2_create_copy(struct dml2_context **dst_dml2,
        struct dml2_context *src_dml2);
+void dml2_reinit(const struct dc *in_dc,
+                                const struct dml2_configuration_options *config,
+                                struct dml2_context **dml2);
 
 /*
  * dml2_validate - Determines if a display configuration is supported or not.
index c55d5155ecb9c0bd44d35af712c8bb27aac715c4..8b3536c380b8de70f500edef2c91f0169690cd3f 100644 (file)
@@ -1498,6 +1498,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
                return;
        }
 
+       if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+                       resource_is_odm_topology_changed(new_pipe, old_pipe))
+               /* Detect odm changes */
+               new_pipe->update_flags.bits.odm = 1;
+
        /* Exit on unchanged, unused pipe */
        if (!old_pipe->plane_state && !new_pipe->plane_state)
                return;
@@ -1551,10 +1556,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
 
        /* Detect top pipe only changes */
        if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
-               /* Detect odm changes */
-               if (resource_is_odm_topology_changed(new_pipe, old_pipe))
-                       new_pipe->update_flags.bits.odm = 1;
-
                /* Detect global sync changes */
                if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
                                || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
@@ -1999,19 +2000,20 @@ void dcn20_program_front_end_for_ctx(
        DC_LOGGER_INIT(dc->ctx->logger);
        unsigned int prev_hubp_count = 0;
        unsigned int hubp_count = 0;
+       struct pipe_ctx *pipe;
 
        if (resource_is_pipe_topology_changed(dc->current_state, context))
                resource_log_pipe_topology_update(dc, context);
 
        if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
                for (i = 0; i < dc->res_pool->pipe_count; i++) {
-                       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+                       pipe = &context->res_ctx.pipe_ctx[i];
 
-                       if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
-                               ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+                       if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+                               ASSERT(!pipe->plane_state->triplebuffer_flips);
                                /*turn off triple buffer for full update*/
                                dc->hwss.program_triplebuffer(
-                                               dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+                                               dc, pipe, pipe->plane_state->triplebuffer_flips);
                        }
                }
        }
@@ -2085,12 +2087,22 @@ void dcn20_program_front_end_for_ctx(
                        DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
                }
 
+       /* update ODM for blanked OTG master pipes */
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               pipe = &context->res_ctx.pipe_ctx[i];
+               if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+                               !resource_is_pipe_type(pipe, DPP_PIPE) &&
+                               pipe->update_flags.bits.odm &&
+                               hws->funcs.update_odm)
+                       hws->funcs.update_odm(dc, context, pipe);
+       }
+
        /*
         * Program all updated pipes, order matters for mpcc setup. Start with
         * top pipe and program all pipes that follow in order
         */
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+               pipe = &context->res_ctx.pipe_ctx[i];
 
                if (pipe->plane_state && !pipe->top_pipe) {
                        while (pipe) {
@@ -2129,17 +2141,6 @@ void dcn20_program_front_end_for_ctx(
                        context->stream_status[0].plane_count > 1) {
                        pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
                }
-
-               /* when dynamic ODM is active, pipes must be reconfigured when all planes are
-                * disabled, as some transitions will leave software and hardware state
-                * mismatched.
-                */
-               if (dc->debug.enable_single_display_2to1_odm_policy &&
-                       pipe->stream &&
-                       pipe->update_flags.bits.disable &&
-                       !pipe->prev_odm_pipe &&
-                       hws->funcs.update_odm)
-                       hws->funcs.update_odm(dc, context, pipe);
        }
 }
 
@@ -2451,7 +2452,7 @@ bool dcn20_wait_for_blank_complete(
        int counter;
 
        for (counter = 0; counter < 1000; counter++) {
-               if (opp->funcs->dpg_is_blanked(opp))
+               if (!opp->funcs->dpg_is_pending(opp))
                        break;
 
                udelay(100);
@@ -2462,7 +2463,7 @@ bool dcn20_wait_for_blank_complete(
                return false;
        }
 
-       return true;
+       return opp->funcs->dpg_is_blanked(opp);
 }
 
 bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
index 7e6b7f2a6dc9ea799c3a4b0a23b5d3e4c6d26b17..8bc3d01537bbd493d08d294b2a8d88104c95b21c 100644 (file)
@@ -812,10 +812,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
        if (pipe_ctx == NULL)
                return;
 
-       if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
                pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
                                pipe_ctx->stream_res.stream_enc,
                                enable);
+
+               /* Wait for two frame to make sure AV mute is sent out */
+               if (enable) {
+                       pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+                       pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+                       pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+                       pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+                       pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+               }
+       }
 }
 
 void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
index aa36d7a56ca8c3b6f3cd47e67455ba67549bf73b..c0b526cf178654f1c9fedb95e08831e965a911a7 100644 (file)
@@ -1156,6 +1156,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
                        dsc->funcs->dsc_disconnect(dsc);
                }
        }
+
+       if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE))
+               /*
+                * blank pattern is generated by OPP, reprogram blank pattern
+                * due to OPP count change
+                */
+               dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true);
 }
 
 unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
@@ -1778,3 +1785,26 @@ void dcn32_prepare_bandwidth(struct dc *dc,
                context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
        }
 }
+
+void dcn32_interdependent_update_lock(struct dc *dc,
+               struct dc_state *context, bool lock)
+{
+       unsigned int i;
+       struct pipe_ctx *pipe;
+       struct timing_generator *tg;
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               pipe = &context->res_ctx.pipe_ctx[i];
+               tg = pipe->stream_res.tg;
+
+               if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+                               !tg->funcs->is_tg_enabled(tg) ||
+                               dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+                       continue;
+
+               if (lock)
+                       dc->hwss.pipe_control_lock(dc, pipe, true);
+               else
+                       dc->hwss.pipe_control_lock(dc, pipe, false);
+       }
+}
index 069e20bc87c0a75af028168253219fc9343b1af3..f55c11fc56ec7a7a3d86cff32269a89fb6c5a3d5 100644 (file)
@@ -129,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
 void dcn32_prepare_bandwidth(struct dc *dc,
        struct dc_state *context);
 
+void dcn32_interdependent_update_lock(struct dc *dc,
+               struct dc_state *context, bool lock);
 #endif /* __DC_HWSS_DCN32_H__ */
index 2b073123d3ede2eb16ae766af9a285511fc6e8ea..67d661dbd5b7c7c20673424102b139c8df47d9be 100644 (file)
@@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
        .disable_plane = dcn20_disable_plane,
        .disable_pixel_data = dcn20_disable_pixel_data,
        .pipe_control_lock = dcn20_pipe_control_lock,
-       .interdependent_update_lock = dcn10_lock_all_pipes,
+       .interdependent_update_lock = dcn32_interdependent_update_lock,
        .cursor_lock = dcn10_cursor_lock,
        .prepare_bandwidth = dcn32_prepare_bandwidth,
        .optimize_bandwidth = dcn20_optimize_bandwidth,
index aee5372e292c5a691c9b6f2d8a45ef31e823889d..d89c92370d5b3a773f524ccfdeeb3d0d5691af63 100644 (file)
@@ -337,6 +337,9 @@ struct opp_funcs {
        bool (*dpg_is_blanked)(
                        struct output_pixel_processor *opp);
 
+       bool (*dpg_is_pending)(struct output_pixel_processor *opp);
+
+
        void (*opp_dpg_set_blank_color)(
                        struct output_pixel_processor *opp,
                        const struct tg_color *color);
index d98d72f35be5bd3eb28f8db759c6f8848bf5fa85..ffad8fe16c54dc2447f5846c8788387e024c5d71 100644 (file)
@@ -331,6 +331,7 @@ struct timing_generator_funcs {
 
        void (*init_odm)(struct timing_generator *tg);
        void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+       void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
 };
 
 #endif
index 26fe81f213da55d39aa7edbb387328192d42685b..bf29fc58ea6a62af45d0643b497cc54a779c0a8b 100644 (file)
@@ -285,12 +285,12 @@ struct link_service {
                        enum replay_FW_Message_type msg,
                        union dmub_replay_cmd_set *cmd_data);
        bool (*edp_set_coasting_vtotal)(
-                       struct dc_link *link, uint16_t coasting_vtotal);
+                       struct dc_link *link, uint32_t coasting_vtotal);
        bool (*edp_replay_residency)(const struct dc_link *link,
                        unsigned int *residency, const bool is_start,
                        const bool is_alpm);
        bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
-                       const unsigned int *power_opts, uint16_t coasting_vtotal);
+                       const unsigned int *power_opts, uint32_t coasting_vtotal);
 
        bool (*edp_wait_for_t12)(struct dc_link *link);
        bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
index acfbbc638cc647668ad6cfa86625e18b294ceb82..3baa2bdd6dd652c919f46529d244ce89b4c9c65c 100644 (file)
@@ -1034,7 +1034,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
        return true;
 }
 
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
 {
        struct dc *dc = link->ctx->dc;
        struct dmub_replay *replay = dc->res_pool->replay;
@@ -1073,7 +1073,7 @@ bool edp_replay_residency(const struct dc_link *link,
 }
 
 bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
-       const unsigned int *power_opts, uint16_t coasting_vtotal)
+       const unsigned int *power_opts, uint32_t coasting_vtotal)
 {
        struct dc  *dc = link->ctx->dc;
        struct dmub_replay *replay = dc->res_pool->replay;
index 34e521af7bb482260539bcf66821741531ab17af..a158c6234d4225e6f665a8082ad351e5ad809d16 100644 (file)
@@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link,
 bool edp_send_replay_cmd(struct dc_link *link,
                        enum replay_FW_Message_type msg,
                        union dmub_replay_cmd_set *cmd_data);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
 bool edp_replay_residency(const struct dc_link *link,
        unsigned int *residency, const bool is_start, const bool is_alpm);
 bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
 bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
-       const unsigned int *power_opts, uint16_t coasting_vtotal);
+       const unsigned int *power_opts, uint32_t coasting_vtotal);
 bool edp_wait_for_t12(struct dc_link *link);
 bool edp_is_ilr_optimization_required(struct dc_link *link,
        struct dc_crtc_timing *crtc_timing);
index ab81594a7fadcc0ea6eecb148ddc264202a1c0df..6c2e84d3967fc552a6a5ee346d5731cfdd920098 100644 (file)
@@ -557,7 +557,8 @@ struct dcn_optc_registers {
        type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
        type OTG_CRC_DATA_FORMAT;\
        type OTG_V_TOTAL_LAST_USED_BY_DRR;\
-       type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
+       type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
+       type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
 
 #define TG_REG_FIELD_LIST_DCN3_2(type) \
        type OTG_H_TIMING_DIV_MODE_MANUAL;
index 82349354332548e160494c23bee15acaa18b7630..f07a4c7e48bc23ed0d2351aef46ef38907ee265f 100644 (file)
@@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
        }
 }
 
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg)
+{
+       struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+       REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000);
+}
+
 void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -345,6 +352,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
                .set_odm_bypass = optc32_set_odm_bypass,
                .set_odm_combine = optc32_set_odm_combine,
                .get_odm_combine_segments = optc32_get_odm_combine_segments,
+               .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
                .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
                .get_optc_source = optc2_get_optc_source,
                .set_out_mux = optc3_set_out_mux,
index 8ce3b178cab06513fffc93bf3ef9b84d288f505b..0c2c14695561961212aff800f100ac7160d04ad3 100644 (file)
@@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man
 void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments);
 void optc32_set_odm_bypass(struct timing_generator *optc,
                const struct dc_crtc_timing *dc_crtc_timing);
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
 
 #endif /* __DC_OPTC_DCN32_H__ */
index 3f3951f3ba9834eb5ba4a9599a9f34051ed87f50..ce1754cc1f4631bcb800891ab7bd74ad52a36a69 100644 (file)
@@ -1771,6 +1771,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
        dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
 
        dcn32_override_min_req_memclk(dc, context);
+       dcn32_override_min_req_dcfclk(dc, context);
 
        BW_VAL_TRACE_END_WATERMARKS();
 
@@ -1930,6 +1931,8 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
 {
        DC_FP_START();
        dcn32_update_bw_bounding_box_fpu(dc, bw_params);
+       if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+               dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
        DC_FP_END();
 }
 
index 0c87b0fabba7d96ff38180900e41f1438419912c..2258c5c7212d86902b4352fbf0fe3da01b524c78 100644 (file)
@@ -42,6 +42,7 @@
 #define SUBVP_ACTIVE_MARGIN_LIST_LEN 2
 #define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
 #define DCN3_2_VMIN_DISPCLK_HZ 717000000
+#define MIN_SUBVP_DCFCLK_KHZ 400000
 
 #define TO_DCN32_RES_POOL(pool)\
        container_of(pool, struct dcn32_resource_pool, base)
@@ -181,6 +182,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
 
 void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
 
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
+
 /* definitions for run time init of reg offsets */
 
 /* CLK SRC */
index b356fed1726d92dea75ac98f5669896cb4dcca80..296a0a8e71459f79f8478569287bd11dcae50cad 100644 (file)
@@ -1581,6 +1581,8 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
 {
        DC_FP_START();
        dcn321_update_bw_bounding_box_fpu(dc, bw_params);
+       if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+               dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
        DC_FP_END();
 }
 
index a529e369b2ace9b8e28254e0a36cd02106ccab84..af3fe8bb0728b114a735faddfb68ae38360af334 100644 (file)
@@ -3238,6 +3238,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
         * Currently the support is only for 0 or 1
         */
        uint8_t panel_inst;
+       /**
+        * 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
+        */
+       uint16_t coasting_vtotal_high;
+       /**
+        * Explicit padding to 4 byte boundary.
+        */
+       uint8_t pad[2];
 };
 
 /**
index 8c137d7c032e1ffa5d4e2ac288424ba4edba3008..7c9805705fd3804f15c4d297dbeceb6cf05656de 100644 (file)
@@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
        hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
+       if (!display)
+               return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
        hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
 
        if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
index e304e8435fb8f1c5e29428f72c20a6097fb57697..2a3698fd2dc242e6927d1c9c399197f1ccb3f40c 100644 (file)
@@ -975,7 +975,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
 
 void set_replay_coasting_vtotal(struct dc_link *link,
        enum replay_coasting_vtotal_type type,
-       uint16_t vtotal)
+       uint32_t vtotal)
 {
        link->replay_settings.coasting_vtotal_table[type] = vtotal;
 }
index bef4815e1703d78cdebc6f49bc160932d08c5272..ff7e6f3cd6be230b95755188043e76b8041db121 100644 (file)
@@ -56,7 +56,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
 void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
 void set_replay_coasting_vtotal(struct dc_link *link,
        enum replay_coasting_vtotal_type type,
-       uint16_t vtotal);
+       uint32_t vtotal);
 void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
 void calculate_replay_link_off_frame_count(struct dc_link *link,
        uint16_t vtotal, uint16_t htotal);
index 1d96eb274d72d462de9330166a01d1acc881b2ad..0c2d04f978ac92257d78a184786e48c837e38b7f 100644 (file)
@@ -1283,10 +1283,8 @@ static int arcturus_get_power_limit(struct smu_context *smu,
                                        uint32_t *max_power_limit,
                                        uint32_t *min_power_limit)
 {
-       struct smu_11_0_powerplay_table *powerplay_table =
-               (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
        PPTable_t *pptable = smu->smu_table.driver_pptable;
-       uint32_t power_limit, od_percent_upper, od_percent_lower;
+       uint32_t power_limit;
 
        if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
                /* the last hope to figure out the ppt limit */
@@ -1302,26 +1300,10 @@ static int arcturus_get_power_limit(struct smu_context *smu,
                *current_power_limit = power_limit;
        if (default_power_limit)
                *default_power_limit = power_limit;
-
-       if (smu->od_enabled)
-               od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-       else
-               od_percent_upper = 0;
-
-       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-
-       dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
-                                                       od_percent_upper, od_percent_lower, power_limit);
-
-       if (max_power_limit) {
-               *max_power_limit = power_limit * (100 + od_percent_upper);
-               *max_power_limit /= 100;
-       }
-
-       if (min_power_limit) {
-               *min_power_limit = power_limit * (100 - od_percent_lower);
-               *min_power_limit /= 100;
-       }
+       if (max_power_limit)
+               *max_power_limit = power_limit;
+       if (min_power_limit)
+               *min_power_limit = power_limit;
 
        return 0;
 }
index ed189a3878ebe7199833e495f45417461897a93a..836b1df7992862614d017c0dd4d919f0f80a3965 100644 (file)
@@ -2339,7 +2339,7 @@ static int navi10_get_power_limit(struct smu_context *smu,
                (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
        struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
        PPTable_t *pptable = smu->smu_table.driver_pptable;
-       uint32_t power_limit, od_percent_upper, od_percent_lower;
+       uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
 
        if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
                /* the last hope to figure out the ppt limit */
@@ -2356,13 +2356,16 @@ static int navi10_get_power_limit(struct smu_context *smu,
        if (default_power_limit)
                *default_power_limit = power_limit;
 
-       if (smu->od_enabled &&
-                   navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
-               od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-       else
-               od_percent_upper = 0;
-
-       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+       if (powerplay_table) {
+               if (smu->od_enabled &&
+                           navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+                       od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+                       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+               } else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+                       od_percent_upper = 0;
+                       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+               }
+       }
 
        dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
                                        od_percent_upper, od_percent_lower, power_limit);
index e2ad2b972ab0b3550d7aceb66e632eb372a0ffc5..1f18b61884f3f2ae8c3f4415b571329659b7a52c 100644 (file)
@@ -617,6 +617,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
        return throttler_status;
 }
 
+static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
+                                                  enum SMU_11_0_7_ODFEATURE_CAP cap)
+{
+       return od_table->cap[cap];
+}
+
 static int sienna_cichlid_get_power_limit(struct smu_context *smu,
                                          uint32_t *current_power_limit,
                                          uint32_t *default_power_limit,
@@ -625,7 +631,8 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
 {
        struct smu_11_0_7_powerplay_table *powerplay_table =
                (struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
-       uint32_t power_limit, od_percent_upper, od_percent_lower;
+       struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
+       uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
        uint16_t *table_member;
 
        GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
@@ -640,12 +647,16 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
        if (default_power_limit)
                *default_power_limit = power_limit;
 
-       if (smu->od_enabled)
-               od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
-       else
-               od_percent_upper = 0;
-
-       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+       if (powerplay_table) {
+               if (smu->od_enabled &&
+                               sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT)) {
+                       od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+                       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+               } else if ((sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT))) {
+                       od_percent_upper = 0;
+                       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+               }
+       }
 
        dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
                                        od_percent_upper, od_percent_lower, power_limit);
@@ -1250,12 +1261,6 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
        return dpm_desc->SnapToDiscrete == 0;
 }
 
-static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
-                                                  enum SMU_11_0_7_ODFEATURE_CAP cap)
-{
-       return od_table->cap[cap];
-}
-
 static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
                                                enum SMU_11_0_7_ODSETTING_ID setting,
                                                uint32_t *min, uint32_t *max)
index 9b80f18ea6c359f279f050ee9f645b92dd43d057..9c03296f92cdd41c868406dfd861bf56a77c2e81 100644 (file)
@@ -2356,7 +2356,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
                (struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
        PPTable_t *pptable = table_context->driver_pptable;
        SkuTable_t *skutable = &pptable->SkuTable;
-       uint32_t power_limit, od_percent_upper, od_percent_lower;
+       uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
        uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
 
        if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@@ -2369,12 +2369,16 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
        if (default_power_limit)
                *default_power_limit = power_limit;
 
-       if (smu->od_enabled)
-               od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
-       else
-               od_percent_upper = 0;
-
-       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+       if (powerplay_table) {
+               if (smu->od_enabled &&
+                               smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+                       od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+                       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+               } else if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+                       od_percent_upper = 0;
+                       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+               }
+       }
 
        dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
                                        od_percent_upper, od_percent_lower, power_limit);
index 3dc7b60cb0754d0f62fd3cead74f1553071b8597..7318964f1f148fae680df028b0ae9c1cc74cb616 100644 (file)
@@ -2320,7 +2320,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
                (struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
        PPTable_t *pptable = table_context->driver_pptable;
        SkuTable_t *skutable = &pptable->SkuTable;
-       uint32_t power_limit, od_percent_upper, od_percent_lower;
+       uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
        uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
 
        if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@@ -2333,12 +2333,16 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
        if (default_power_limit)
                *default_power_limit = power_limit;
 
-       if (smu->od_enabled)
-               od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
-       else
-               od_percent_upper = 0;
-
-       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+       if (powerplay_table) {
+               if (smu->od_enabled &&
+                               (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) {
+                       od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+                       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+               } else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+                       od_percent_upper = 0;
+                       od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+               }
+       }
 
        dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
                                        od_percent_upper, od_percent_lower, power_limit);
index e7c4bef74aa46a37d0e1b4e87a78eee6f1c94b42..4b2ae27f0a57f2461a04f293a540c130a0fd17ee 100644 (file)
@@ -441,23 +441,21 @@ lt8912_connector_mode_valid(struct drm_connector *connector,
 static int lt8912_connector_get_modes(struct drm_connector *connector)
 {
        const struct drm_edid *drm_edid;
-       int ret = -1;
-       int num = 0;
        struct lt8912 *lt = connector_to_lt8912(connector);
        u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+       int ret, num;
 
        drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector);
        drm_edid_connector_update(connector, drm_edid);
-       if (drm_edid) {
-               num = drm_edid_connector_add_modes(connector);
-       } else {
-               return ret;
-       }
+       if (!drm_edid)
+               return 0;
+
+       num = drm_edid_connector_add_modes(connector);
 
        ret = drm_display_info_set_bus_formats(&connector->display_info,
                                               &bus_format, 1);
-       if (ret)
-               num = ret;
+       if (ret < 0)
+               num = 0;
 
        drm_edid_free(drm_edid);
        return num;
index bcf8bccd86d6cc0a90f8bbba7a9cc76a64b3803b..f4f593ad8f79574604401eeb2bd819e5a3acbc7f 100644 (file)
@@ -294,8 +294,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
 static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
 {
        struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
-       unsigned int count;
        const struct drm_edid *drm_edid;
+       int count;
 
        drm_edid = drm_bridge_edid_read(&lt9611uxc->bridge, connector);
        drm_edid_connector_update(connector, drm_edid);
index e814020bbcd3b3275d71174fe907c18ffeddefe0..cfbe020de54e0143162f7fea369a3bb458a50f13 100644 (file)
@@ -274,19 +274,24 @@ EXPORT_SYMBOL(drm_panel_disable);
  * The modes probed from the panel are automatically added to the connector
  * that the panel is attached to.
  *
- * Return: The number of modes available from the panel on success or a
- * negative error code on failure.
+ * Return: The number of modes available from the panel on success, or 0 on
+ * failure (no modes).
  */
 int drm_panel_get_modes(struct drm_panel *panel,
                        struct drm_connector *connector)
 {
        if (!panel)
-               return -EINVAL;
+               return 0;
 
-       if (panel->funcs && panel->funcs->get_modes)
-               return panel->funcs->get_modes(panel, connector);
+       if (panel->funcs && panel->funcs->get_modes) {
+               int num;
 
-       return -EOPNOTSUPP;
+               num = panel->funcs->get_modes(panel, connector);
+               if (num > 0)
+                       return num;
+       }
+
+       return 0;
 }
 EXPORT_SYMBOL(drm_panel_get_modes);
 
index 4d60cc810b577324f4c504ddadba2a23821fabd4..bf2dd1f46b6c4f17577c849a375114443a094a28 100644 (file)
@@ -422,6 +422,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
 
        count = connector_funcs->get_modes(connector);
 
+       /* The .get_modes() callback should not return negative values. */
+       if (count < 0) {
+               drm_err(connector->dev, ".get_modes() returned %pe\n",
+                       ERR_PTR(count));
+               count = 0;
+       }
+
        /*
         * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
         * override/firmware EDID.
index ca31bad6c5760dd9429df203a8a3d1c20d722a9f..f48c4343f4690f6dc288d1d8a1185a604d3c9152 100644 (file)
@@ -74,16 +74,15 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
 {
        struct exynos_dp_device *dp = to_dp(plat_data);
        struct drm_display_mode *mode;
-       int num_modes = 0;
 
        if (dp->plat_data.panel)
-               return num_modes;
+               return 0;
 
        mode = drm_mode_create(connector->dev);
        if (!mode) {
                DRM_DEV_ERROR(dp->dev,
                              "failed to create a new display mode.\n");
-               return num_modes;
+               return 0;
        }
 
        drm_display_mode_from_videomode(&dp->vm, mode);
@@ -94,7 +93,7 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
        drm_mode_set_name(mode);
        drm_mode_probed_add(connector, mode);
 
-       return num_modes + 1;
+       return 1;
 }
 
 static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
index 00382f28748ac0843653e1e1f1cf749590047468..f5bbba9ad225263ab72bc78aa696a6193794f1c6 100644 (file)
@@ -316,14 +316,14 @@ static int vidi_get_modes(struct drm_connector *connector)
         */
        if (!ctx->raw_edid) {
                DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
-               return -EFAULT;
+               return 0;
        }
 
        edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
        edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
        if (!edid) {
                DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
-               return -ENOMEM;
+               return 0;
        }
 
        drm_connector_update_edid_property(connector, edid);
index 43bed6cbaaea072e7c8860692739d4873a4d2c39..b1d02dec3774d095243597999359fde10fd9ad12 100644 (file)
@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
        int ret;
 
        if (!hdata->ddc_adpt)
-               return -ENODEV;
+               return 0;
 
        edid = drm_get_edid(connector, hdata->ddc_adpt);
        if (!edid)
-               return -ENODEV;
+               return 0;
 
        hdata->dvi_mode = !connector->display_info.is_hdmi;
        DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
index 70349739dd89bed615e221fb00900fc4b53b496d..55dedd73f528c8842941a96f53eb82d4c2a863dc 100644 (file)
@@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
                int ret;
 
                if (!mode)
-                       return -EINVAL;
+                       return 0;
 
                ret = of_get_drm_display_mode(np, &imxpd->mode,
                                              &imxpd->bus_flags,
                                              OF_USE_NATIVE_MODE);
                if (ret) {
                        drm_mode_destroy(connector->dev, mode);
-                       return ret;
+                       return 0;
                }
 
                drm_mode_copy(mode, &imxpd->mode);
index 56dcd25db1ce2c79b8a6be7c0ea684838e74e655..db8cbf6151129d63fd24e42626dfd10e4c84bb33 100644 (file)
@@ -1256,6 +1256,8 @@ out:
                        drm_vma_node_unmap(&nvbo->bo.base.vma_node,
                                           bdev->dev_mapping);
                        nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
+                       nvbo->bo.resource->bus.offset = 0;
+                       nvbo->bo.resource->bus.addr = NULL;
                        goto retry;
                }
 
index 666eb93b1742ca5435cf0567e28e1664122bad8b..11b4c9c274a1a597cb3592019d873345c241d1cd 100644 (file)
@@ -41,7 +41,6 @@ r535_devinit_new(const struct nvkm_devinit_func *hw,
 
        rm->dtor = r535_devinit_dtor;
        rm->post = hw->post;
-       rm->disable = hw->disable;
 
        ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
        if (ret)
index a73a5b58979045b07468c1443940f87e1b151f67..9994cbd6f1c40c0c798498687f4f5d7168e883c5 100644 (file)
@@ -1430,6 +1430,10 @@ r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
 
 /**
  * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
+ * @priv: gsp pointer
+ * @fn: function number (ignored)
+ * @repv: pointer to libos print RPC
+ * @repc: message size
  *
  * The GSP sequencer is a list of I/O commands that the GSP can send to
  * the driver to perform for various purposes.  The most common usage is to
@@ -1781,6 +1785,7 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
 
 /**
  * r535_gsp_libos_init() -- create the libos arguments structure
+ * @gsp: gsp pointer
  *
  * The logging buffers are byte queues that contain encoded printf-like
  * messages from GSP-RM.  They need to be decoded by a special application
@@ -1920,6 +1925,10 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
 
 /**
  * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
+ * @gsp: gsp pointer
+ * @sgt: S/G list to traverse
+ * @size: size of the image, in bytes
+ * @rx3: radix3 array to update
  *
  * The GSP uses a three-level page table, called radix3, to map the firmware.
  * Each 64-bit "pointer" in the table is either the bus address of an entry in
index 69001a3dc0df23380d4e5f3a3c973b9c73ba3065..2d1880c61b50d1316c3119c0d2fa2ac733d3396b 100644 (file)
@@ -166,7 +166,7 @@ sun4i_hdmi_connector_clock_valid(const struct drm_connector *connector,
                                 unsigned long long clock)
 {
        const struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
-       unsigned long diff = clock / 200; /* +-0.5% allowed by HDMI spec */
+       unsigned long diff = div_u64(clock, 200); /* +-0.5% allowed by HDMI spec */
        long rounded_rate;
 
        if (mode->flags & DRM_MODE_FLAG_DBLCLK)
index 34f807ed1c315e3cb0cd53a59d34eb36c3bceec4..d8751ea2030329ccd1cc7ab4b72dd7b9fe63538f 100644 (file)
@@ -509,7 +509,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
        edid = drm_get_edid(connector, vc4_hdmi->ddc);
        cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
        if (!edid)
-               return -ENODEV;
+               return 0;
 
        drm_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
index 952496c6260dfb5c3e9f798137cd4737a4040acb..826c8b389672502dfebd6e89c6c1997bf8f0c9a2 100644 (file)
@@ -235,6 +235,29 @@ retry:
                        goto err_unlock_list;
        }
 
+       if (!args->num_batch_buffer) {
+               err = xe_vm_lock(vm, true);
+               if (err)
+                       goto err_unlock_list;
+
+               if (!xe_vm_in_lr_mode(vm)) {
+                       struct dma_fence *fence;
+
+                       fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
+                       if (IS_ERR(fence)) {
+                               err = PTR_ERR(fence);
+                               goto err_unlock_list;
+                       }
+                       for (i = 0; i < num_syncs; i++)
+                               xe_sync_entry_signal(&syncs[i], NULL, fence);
+                       xe_exec_queue_last_fence_set(q, vm, fence);
+                       dma_fence_put(fence);
+               }
+
+               xe_vm_unlock(vm);
+               goto err_unlock_list;
+       }
+
        vm_exec.vm = &vm->gpuvm;
        vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
        if (xe_vm_in_lr_mode(vm)) {
@@ -254,24 +277,6 @@ retry:
                goto err_exec;
        }
 
-       if (!args->num_batch_buffer) {
-               if (!xe_vm_in_lr_mode(vm)) {
-                       struct dma_fence *fence;
-
-                       fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
-                       if (IS_ERR(fence)) {
-                               err = PTR_ERR(fence);
-                               goto err_exec;
-                       }
-                       for (i = 0; i < num_syncs; i++)
-                               xe_sync_entry_signal(&syncs[i], NULL, fence);
-                       xe_exec_queue_last_fence_set(q, vm, fence);
-                       dma_fence_put(fence);
-               }
-
-               goto err_exec;
-       }
-
        if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
                err = -EWOULDBLOCK;     /* Aliased to -EAGAIN */
                skip_retry = true;
index 73c535193a984b385f65986c9b1c0d3b79f1f5b4..241c294270d9167f25d1898f8f590c7aabb06ca0 100644 (file)
@@ -69,7 +69,7 @@ static bool access_is_atomic(enum access_type access_type)
 static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
 {
        return BIT(tile->id) & vma->tile_present &&
-               !(BIT(tile->id) & vma->usm.tile_invalidated);
+               !(BIT(tile->id) & vma->tile_invalidated);
 }
 
 static bool vma_matches(struct xe_vma *vma, u64 page_addr)
@@ -226,7 +226,7 @@ retry_userptr:
 
        if (xe_vma_is_userptr(vma))
                ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
-       vma->usm.tile_invalidated &= ~BIT(tile->id);
+       vma->tile_invalidated &= ~BIT(tile->id);
 
 unlock_dma_resv:
        drm_exec_fini(&exec);
index 4ddc55527f9ab3e632635c5f920d4f4420df1255..846f14507d5ff2ed47ff8c0c4dac573e584ade89 100644 (file)
@@ -468,7 +468,7 @@ DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
             TP_ARGS(vma)
 );
 
-DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
+DEFINE_EVENT(xe_vma, xe_vma_invalidate,
             TP_PROTO(struct xe_vma *vma),
             TP_ARGS(vma)
 );
index d28260351af2e330c16dbbf94609f19b285d1806..f88faef4142bde018f336d33d3e2eed726a4bc29 100644 (file)
@@ -708,6 +708,7 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
        int err = 0;
        LIST_HEAD(tmp_evict);
 
+       xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
        lockdep_assert_held_write(&vm->lock);
 
        /* Collect invalidated userptrs */
@@ -724,11 +725,27 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
        list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
                                 userptr.repin_link) {
                err = xe_vma_userptr_pin_pages(uvma);
-               if (err < 0)
-                       return err;
+               if (err == -EFAULT) {
+                       list_del_init(&uvma->userptr.repin_link);
 
-               list_del_init(&uvma->userptr.repin_link);
-               list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
+                       /* Wait for pending binds */
+                       xe_vm_lock(vm, false);
+                       dma_resv_wait_timeout(xe_vm_resv(vm),
+                                             DMA_RESV_USAGE_BOOKKEEP,
+                                             false, MAX_SCHEDULE_TIMEOUT);
+
+                       err = xe_vm_invalidate_vma(&uvma->vma);
+                       xe_vm_unlock(vm);
+                       if (err)
+                               return err;
+               } else {
+                       if (err < 0)
+                               return err;
+
+                       list_del_init(&uvma->userptr.repin_link);
+                       list_move_tail(&uvma->vma.combined_links.rebind,
+                                      &vm->rebind_list);
+               }
        }
 
        return 0;
@@ -2024,7 +2041,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
                        return err;
        }
 
-       if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
+       if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
                return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
                                  true, first_op, last_op);
        } else {
@@ -3214,9 +3231,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
        u8 id;
        int ret;
 
-       xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
        xe_assert(xe, !xe_vma_is_null(vma));
-       trace_xe_vma_usm_invalidate(vma);
+       trace_xe_vma_invalidate(vma);
 
        /* Check that we don't race with page-table updates */
        if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
@@ -3254,7 +3270,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
                }
        }
 
-       vma->usm.tile_invalidated = vma->tile_mask;
+       vma->tile_invalidated = vma->tile_mask;
 
        return 0;
 }
index 79b5cab57711995a6dc3938f5269d73efddee95e..ae5fb565f6bf48d52e29c811a8333793e4e128fd 100644 (file)
@@ -84,11 +84,8 @@ struct xe_vma {
                struct work_struct destroy_work;
        };
 
-       /** @usm: unified shared memory state */
-       struct {
-               /** @tile_invalidated: VMA has been invalidated */
-               u8 tile_invalidated;
-       } usm;
+       /** @tile_invalidated: VMA has been invalidated */
+       u8 tile_invalidated;
 
        /** @tile_mask: Tile mask of where to create binding for this VMA */
        u8 tile_mask;
index 079cc283a18667b4eb0dcbf3fa021d1db09d7b32..c5f6b5a5d1176e516b4dd23db45ce736e04bb5d0 100644 (file)
@@ -111,8 +111,10 @@ void xe_vram_freq_sysfs_init(struct xe_tile *tile)
                return;
 
        kobj = kobject_create_and_add("memory", tile->sysfs);
-       if (!kobj)
+       if (!kobj) {
                drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
+               return;
+       }
 
        err = sysfs_create_group(kobj, &freq_group_attrs);
        if (err) {
index 3606e1a7f965b7ae3d5ac0acc2b5d1a68bbfee38..4baca0d9107b059a79e15949f0e831128cb0a573 100644 (file)
@@ -541,7 +541,7 @@ struct drm_bridge_funcs {
         * The @get_modes callback is mostly intended to support non-probeable
         * displays such as many fixed panels. Bridges that support reading
         * EDID shall leave @get_modes unimplemented and implement the
-        * &drm_bridge_funcs->get_edid callback instead.
+        * &drm_bridge_funcs->edid_read callback instead.
         *
         * This callback is optional. Bridges that implement it shall set the
         * DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops.
@@ -687,7 +687,7 @@ enum drm_bridge_ops {
        /**
         * @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display
         * connected to its output. Bridges that set this flag shall implement
-        * the &drm_bridge_funcs->get_edid callback.
+        * the &drm_bridge_funcs->edid_read callback.
         */
        DRM_BRIDGE_OP_EDID = BIT(1),
        /**
index 0c9f917a4d4be956d50c587a8c7b90011335c9ce..81572d32db0c2bf7c6e2720f4e09d346b0b1a792 100644 (file)
@@ -71,7 +71,6 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
 }
 
 #define DRM_FIXED_POINT                32
-#define DRM_FIXED_POINT_HALF   16
 #define DRM_FIXED_ONE          (1ULL << DRM_FIXED_POINT)
 #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
 #define DRM_FIXED_DIGITS_MASK  (~DRM_FIXED_DECIMAL_MASK)
@@ -90,7 +89,7 @@ static inline int drm_fixp2int(s64 a)
 
 static inline int drm_fixp2int_round(s64 a)
 {
-       return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
+       return drm_fixp2int(a + DRM_FIXED_ONE / 2);
 }
 
 static inline int drm_fixp2int_ceil(s64 a)
index 881b03e4dc2882844eb641b23ea34a9e10101d27..9ed42469540eb63e6a45d138ee2a51ff41d91b81 100644 (file)
@@ -898,7 +898,8 @@ struct drm_connector_helper_funcs {
         *
         * RETURNS:
         *
-        * The number of modes added by calling drm_mode_probed_add().
+        * The number of modes added by calling drm_mode_probed_add(). Return 0
+        * on failures (no modes) instead of negative error codes.
         */
        int (*get_modes)(struct drm_connector *connector);