config DRM_KUNIT_TEST_HELPERS
tristate
depends on DRM && KUNIT
+ select DRM_KMS_HELPER
help
KUnit Helpers for KMS drivers.
select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
select PRIME_NUMBERS
{
int ret;
- if (!adev->kfd.init_complete)
+ if (!adev->kfd.init_complete || adev->kfd.client.dev)
return 0;
ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
mutex_lock(&process_info->lock);
- drm_exec_init(&exec, 0, 0);
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ pr_err("Locking VM PD failed, ret: %d\n", ret);
goto ttm_reserve_fail;
+ }
}
/* Reserve all BOs and page tables/directory. Add all BOs from
gobj = &mem->bo->tbo.base;
ret = drm_exec_prepare_obj(&exec, gobj, 1);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
goto ttm_reserve_fail;
+ }
}
}
* validations above would invalidate DMABuf imports again.
*/
ret = process_validate_vms(process_info, &exec.ticket);
- if (ret)
+ if (ret) {
+ pr_debug("Validating VMs failed, ret: %d\n", ret);
goto validate_map_fail;
+ }
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
* early on during init and before calling to RREG32.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
- if (!adev->reset_domain) {
- r = -ENOMEM;
- goto unmap_memory;
- }
+ if (!adev->reset_domain)
+ return -ENOMEM;
/* detect hw virtualization here */
amdgpu_detect_virtualization(adev);
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- goto unmap_memory;
+ return r;
}
amdgpu_device_set_mcbp(adev);
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
- goto unmap_memory;
+ return r;
/* Get rid of things like offb */
r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
if (r)
- goto unmap_memory;
+ return r;
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
if (adev->gmc.xgmi.supported) {
r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
- goto unmap_memory;
+ return r;
}
/* enable PCIE atomic ops */
failed:
amdgpu_vf_error_trans_all(adev);
-unmap_memory:
- iounmap(adev->rmmio);
return r;
}
}
for (i = 0; i < mgpu_info.num_dgpu; i++) {
adev = mgpu_info.gpu_ins[i].adev;
- if (!adev->kfd.init_complete)
+ if (!adev->kfd.init_complete) {
+ kgd2kfd_init_zone_device(adev);
amdgpu_amdkfd_device_init(adev);
+ amdgpu_amdkfd_drm_client_create(adev);
+ }
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
}
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KCQ enable failed\n");
+ DRM_ERROR("KGQ enable failed\n");
return r;
}
*/
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{
+ int r;
+
if (bo->kfd_bo)
- return mmu_interval_notifier_insert(&bo->notifier, current->mm,
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
addr, amdgpu_bo_size(bo),
&amdgpu_hmm_hsa_ops);
- return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
- amdgpu_bo_size(bo),
- &amdgpu_hmm_gfx_ops);
+ else
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ amdgpu_bo_size(bo),
+ &amdgpu_hmm_gfx_ops);
+ if (r)
+ /*
+ * Make sure amdgpu_hmm_unregister() doesn't call
+ * mmu_interval_notifier_remove() when the notifier isn't properly
+ * initialized.
+ */
+ bo->notifier.mm = NULL;
+
+ return r;
}
/**
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass hdcp initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->hdcp_context.context.bin_desc.size_bytes ||
!psp->hdcp_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ if (!psp->hdcp_context.context.initialized)
+ return 0;
+
return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
}
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass dtm initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->dtm_context.context.bin_desc.size_bytes ||
!psp->dtm_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ if (!psp->dtm_context.context.initialized)
+ return 0;
+
return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
}
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass securedisplay initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
gtt->ttm.dma_address, flags);
}
+ gtt->bound = true;
}
/*
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
+#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
{
char ucode_prefix[30];
char fw_name[40];
- int r;
+ int r, i;
- amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
- if (r)
- amdgpu_ucode_release(&adev->vcn.fw);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6) &&
+ i == 1) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i);
+ }
+ r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], fw_name);
+ if (r) {
+ amdgpu_ucode_release(&adev->vcn.fw[i]);
+ return r;
+ }
+ }
return r;
}
}
}
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
+
+ amdgpu_ucode_release(&adev->vcn.fw[j]);
}
- amdgpu_ucode_release(&adev->vcn.fw);
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
const struct common_firmware_header *hdr;
unsigned int offset;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
+ memcpy_toio(adev->vcn.inst[i].cpu_addr,
+ adev->vcn.fw[i]->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
/* currently only support 2 FW instances */
if (i >= 2) {
dev_info(adev->dev, "More then 2 VCN FW instances!\n");
}
idx = AMDGPU_UCODE_ID_VCN + i;
adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.fw;
+ adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
struct amdgpu_vcn {
unsigned fw_version;
struct delayed_work idle_work;
- const struct firmware *fw; /* VCN firmware */
+ const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
bool indirect_sram;
{
unsigned int ret;
- if (ring->adev->vpe.collaborate_mode)
- return ~0;
-
amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev))
+ return;
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(10, 1, 10):
soc15_program_register_sequence(adev,
u32 tmp;
int i;
- WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v10_0_setup_rb(adev);
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
gfx_v10_3_program_pbb_mode(adev);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0) && !amdgpu_sriov_vf(adev))
gfx_v10_3_set_power_brake_sequence(adev);
return r;
{
uint64_t value;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Program the AGP BAR */
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
- mmhub_cid = mmhub_client_ids_v3_3[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
+ mmhub_client_ids_v3_3[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
break;
default:
mmhub_cid = NULL;
break;
}
- if (!mmhub_cid && cid == 0x140)
- mmhub_cid = "UMSCH";
-
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
mmhub_cid ? mmhub_cid : "unknown", cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
u32 rb_cntl, ib_cntl;
- int i, unset = 0;
+ int i;
for_each_inst(i, inst_mask) {
sdma[i] = &adev->sdma.instance[i].ring;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = 1;
- }
-
rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
uint32_t inst_mask)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
- bool unset = false;
for_each_inst(i, inst_mask) {
- sdma[i] = &adev->sdma.instance[i].page;
-
- if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (!unset)) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = true;
- }
-
rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
RB_ENABLE, 0);
r = amdgpu_ring_test_helper(page);
if (r)
return r;
-
- if (adev->mman.buffer_funcs_ring == page)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return r;
*/
static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
*/
static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
if (amdgpu_sriov_vf(adev))
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
init_table += header->vcn_table_offset;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
*/
static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size;
uint32_t offset;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
uint32_t offset;
/* cache window 0: fw */
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
*/
static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4);
uint32_t offset;
/* cache window 0: fw */
static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
uint32_t offset;
/* cache window 0: fw */
mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
{
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
vcn_inst = GET_INST(VCN, inst_idx);
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
-#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
#define VCN_HARVEST_MMSCH 0
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
-
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ adev->dm.dc->debug.using_dml2 = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
if (!adev->dm.freesync_module)
goto update;
- if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
- || sink->sink_signal == SIGNAL_TYPE_EDP) {
+ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
- if (edid) {
- edid_check_required = is_dp_capable_without_timing_msa(
- adev->dm.dc,
- amdgpu_dm_connector);
+ if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ amdgpu_dm_connector)) {
+ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+ freesync_capable = true;
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ } else {
+ edid_check_required = edid->version > 1 ||
+ (edid->version == 1 &&
+ edid->revision > 1);
+ }
}
- if (edid_check_required == true && (edid->version > 1 ||
- (edid->version == 1 && edid->revision > 1))) {
+ if (edid_check_required) {
for (i = 0; i < 4; i++) {
timing = &edid->detailed_timings[i];
if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
+ /* DPPCLK */
+ dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
+ &num_entries_per_clk->num_dppclk_levels);
+ num_levels = num_entries_per_clk->num_dppclk_levels;
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK);
+ //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
+ if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950;
+
if (num_entries_per_clk->num_dcfclk_levels &&
num_entries_per_clk->num_dtbclk_levels &&
num_entries_per_clk->num_dispclk_levels)
= khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz);
}
+ for (i = 0; i < num_levels; i++)
+ if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950;
+
/* Get UCLK, update bounding box */
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
}
}
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ wait_for_blank_complete(dc, context);
+}
+
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
+ /*
+ * optimized dispclk depends on ODM setup. Need to wait for ODM
+ * update pending complete before optimizing bandwidth.
+ */
+ wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
if (stream->link->replay_settings.config.replay_supported)
return true;
+ if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
+ return true;
+
return false;
}
top_pipe_to_program->stream->update_flags.raw = 0;
}
-static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
}
}
}
+ wait_for_odm_update_pending_complete(dc, dc_context);
}
static void commit_planes_for_stream(struct dc *dc,
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
- uint32_t idle_state = 0;
-
if (dc->debug.disable_idle_power_optimizations)
return false;
if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
return false;
- if (dc->hwss.get_idle_state)
- idle_state = dc->hwss.get_idle_state(dc);
-
- if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
- !(idle_state & DMUB_IPS2_ALLOW_MASK))
- return true;
+ if (!dc->ctx->dmub_srv)
+ return false;
- return false;
+ return dc->ctx->dmub_srv->idle_allowed;
}
/* set min and max memory clock to lowest and highest DPM level, respectively */
void dc_state_release(struct dc_state *state)
{
- kref_put(&state->refcount, dc_state_free);
+ if (state != NULL)
+ kref_put(&state->refcount, dc_state_free);
}
/*
* dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
/* SMU optimization is enabled */
bool replay_smu_opt_enable;
/* Current Coasting vtotal */
- uint16_t coasting_vtotal;
+ uint32_t coasting_vtotal;
/* Coasting vtotal table */
- uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
enum replay_link_off_frame_count_level link_off_frame_count_level;
/* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
.opp_set_disp_pattern_generator = NULL,
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
+ .dpg_is_pending = NULL,
.opp_destroy = opp1_destroy
};
(double_buffer_pending == 0);
}
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+ uint32_t double_buffer_pending;
+ uint32_t dpg_en;
+
+ REG_GET(DPG_CONTROL, DPG_EN, &dpg_en);
+
+ REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
+
+ return (dpg_en == 1 && double_buffer_pending == 1);
+}
+
void opp2_program_left_edge_extra_pixel (
struct output_pixel_processor *opp,
bool count)
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp);
+
void opp2_dpg_set_blank_color(
struct output_pixel_processor *opp,
const struct tg_color *color);
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
pipe_cnt++;
}
}
+
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
+{
+ if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
+}
* - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
+ !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
{
+ dml_uint_t width, height;
+
+ if (in->timing.h_addressable > 3840)
+ width = 3840;
+ else
+ width = in->timing.h_addressable; // 4K max
+
+ if (in->timing.v_addressable > 2160)
+ height = 2160;
+ else
+ height = in->timing.v_addressable; // 4K max
+
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
out->GPUVMMinPageSizeKBytes[location] = 256;
- out->ViewportWidth[location] = in->timing.h_addressable;
- out->ViewportHeight[location] = in->timing.v_addressable;
+ out->ViewportWidth[location] = width;
+ out->ViewportHeight[location] = height;
out->ViewportStationary[location] = false;
out->ViewportWidthChroma[location] = 0;
out->ViewportHeightChroma[location] = 0;
out->HTapsChroma[location] = 0;
out->VTapsChroma[location] = 0;
out->SourceScan[location] = dml_rotation_0;
- out->ScalerRecoutWidth[location] = in->timing.h_addressable;
+ out->ScalerRecoutWidth[location] = width;
out->LBBitPerPixel[location] = 57;
return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
}
-bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // Allocate Mode Lib Ctx
- *dml2 = dml2_allocate_memory();
-
- if (!(*dml2))
- return false;
// Store config options
(*dml2)->config = *config;
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+}
+
+bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+{
+ // Allocate Mode Lib Ctx
+ *dml2 = dml2_allocate_memory();
+
+ if (!(*dml2))
+ return false;
+
+ dml2_init(in_dc, config, dml2);
- /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/
- //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init);
return true;
}
return true;
}
+
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2)
+{
+
+ dml2_init(in_dc, config, dml2);
+}
struct dml2_context *src_dml2);
bool dml2_create_copy(struct dml2_context **dst_dml2,
struct dml2_context *src_dml2);
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2);
/*
* dml2_validate - Determines if a display configuration is supported or not.
return;
}
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+ resource_is_odm_topology_changed(new_pipe, old_pipe))
+ /* Detect odm changes */
+ new_pipe->update_flags.bits.odm = 1;
+
/* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
/* Detect top pipe only changes */
if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
- /* Detect odm changes */
- if (resource_is_odm_topology_changed(new_pipe, old_pipe))
- new_pipe->update_flags.bits.odm = 1;
-
/* Detect global sync changes */
if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
+ struct pipe_ctx *pipe;
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
}
}
}
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
+ /* update ODM for blanked OTG master pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+
/*
* Program all updated pipes, order matters for mpcc setup. Start with
* top pipe and program all pipes that follow in order
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) {
while (pipe) {
context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
-
- /* when dynamic ODM is active, pipes must be reconfigured when all planes are
- * disabled, as some transitions will leave software and hardware state
- * mismatched.
- */
- if (dc->debug.enable_single_display_2to1_odm_policy &&
- pipe->stream &&
- pipe->update_flags.bits.disable &&
- !pipe->prev_odm_pipe &&
- hws->funcs.update_odm)
- hws->funcs.update_odm(dc, context, pipe);
}
}
int counter;
for (counter = 0; counter < 1000; counter++) {
- if (opp->funcs->dpg_is_blanked(opp))
+ if (!opp->funcs->dpg_is_pending(opp))
break;
udelay(100);
return false;
}
- return true;
+ return opp->funcs->dpg_is_blanked(opp);
}
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
if (pipe_ctx == NULL)
return;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
pipe_ctx->stream_res.stream_enc,
enable);
+
+ /* Wait for two frame to make sure AV mute is sent out */
+ if (enable) {
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ }
+ }
}
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
dsc->funcs->dsc_disconnect(dsc);
}
}
+
+ if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE))
+ /*
+ * blank pattern is generated by OPP, reprogram blank pattern
+ * due to OPP count change
+ */
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true);
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
}
+
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock)
+{
+ unsigned int i;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ if (lock)
+ dc->hwss.pipe_control_lock(dc, pipe, true);
+ else
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+}
void dcn32_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock);
#endif /* __DC_HWSS_DCN32_H__ */
.disable_plane = dcn20_disable_plane,
.disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
- .interdependent_update_lock = dcn10_lock_all_pipes,
+ .interdependent_update_lock = dcn32_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn32_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
+ bool (*dpg_is_pending)(struct output_pixel_processor *opp);
+
+
void (*opp_dpg_set_blank_color)(
struct output_pixel_processor *opp,
const struct tg_color *color);
void (*init_odm)(struct timing_generator *tg);
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
};
#endif
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint16_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const bool is_alpm);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
return true;
}
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
}
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal)
+ const unsigned int *power_opts, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
bool edp_send_replay_cmd(struct dc_link *link,
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const bool is_alpm);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;\
type OTG_V_TOTAL_LAST_USED_BY_DRR;\
- type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
+ type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
+ type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
}
}
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000);
+}
+
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc32_set_odm_combine,
.get_odm_combine_segments = optc32_get_odm_combine_segments,
+ .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments);
void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
#endif /* __DC_OPTC_DCN32_H__ */
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
dcn32_override_min_req_memclk(dc, context);
+ dcn32_override_min_req_dcfclk(dc, context);
BW_VAL_TRACE_END_WATERMARKS();
{
DC_FP_START();
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END();
}
#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2
#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
+#define MIN_SUBVP_DCFCLK_KHZ 400000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
{
DC_FP_START();
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END();
}
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
+ */
+ uint16_t coasting_vtotal_high;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
- uint16_t vtotal)
+ uint32_t vtotal)
{
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
- uint16_t vtotal);
+ uint32_t vtotal);
void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
- struct smu_11_0_powerplay_table *powerplay_table =
- (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
*current_power_limit = power_limit;
if (default_power_limit)
*default_power_limit = power_limit;
-
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-
- dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
- od_percent_upper, od_percent_lower, power_limit);
-
- if (max_power_limit) {
- *max_power_limit = power_limit * (100 + od_percent_upper);
- *max_power_limit /= 100;
- }
-
- if (min_power_limit) {
- *min_power_limit = power_limit * (100 - od_percent_lower);
- *min_power_limit /= 100;
- }
+ if (max_power_limit)
+ *max_power_limit = power_limit;
+ if (min_power_limit)
+ *min_power_limit = power_limit;
return 0;
}
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled &&
- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
return throttler_status;
}
+static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODFEATURE_CAP cap)
+{
+ return od_table->cap[cap];
+}
+
static int sienna_cichlid_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
{
struct smu_11_0_7_powerplay_table *powerplay_table =
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint16_t *table_member;
GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else if ((sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT))) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
return dpm_desc->SnapToDiscrete == 0;
}
-static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
- enum SMU_11_0_7_ODFEATURE_CAP cap)
-{
- return od_table->cap[cap];
-}
-
static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
enum SMU_11_0_7_ODSETTING_ID setting,
uint32_t *min, uint32_t *max)
(struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
(struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
static int lt8912_connector_get_modes(struct drm_connector *connector)
{
const struct drm_edid *drm_edid;
- int ret = -1;
- int num = 0;
struct lt8912 *lt = connector_to_lt8912(connector);
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ int ret, num;
drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector);
drm_edid_connector_update(connector, drm_edid);
- if (drm_edid) {
- num = drm_edid_connector_add_modes(connector);
- } else {
- return ret;
- }
+ if (!drm_edid)
+ return 0;
+
+ num = drm_edid_connector_add_modes(connector);
ret = drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
- if (ret)
- num = ret;
+ if (ret < 0)
+ num = 0;
drm_edid_free(drm_edid);
return num;
static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
- unsigned int count;
const struct drm_edid *drm_edid;
+ int count;
drm_edid = drm_bridge_edid_read(<9611uxc->bridge, connector);
drm_edid_connector_update(connector, drm_edid);
* The modes probed from the panel are automatically added to the connector
* that the panel is attached to.
*
- * Return: The number of modes available from the panel on success or a
- * negative error code on failure.
+ * Return: The number of modes available from the panel on success, or 0 on
+ * failure (no modes).
*/
int drm_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
if (!panel)
- return -EINVAL;
+ return 0;
- if (panel->funcs && panel->funcs->get_modes)
- return panel->funcs->get_modes(panel, connector);
+ if (panel->funcs && panel->funcs->get_modes) {
+ int num;
- return -EOPNOTSUPP;
+ num = panel->funcs->get_modes(panel, connector);
+ if (num > 0)
+ return num;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(drm_panel_get_modes);
count = connector_funcs->get_modes(connector);
+ /* The .get_modes() callback should not return negative values. */
+ if (count < 0) {
+ drm_err(connector->dev, ".get_modes() returned %pe\n",
+ ERR_PTR(count));
+ count = 0;
+ }
+
/*
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
* override/firmware EDID.
{
struct exynos_dp_device *dp = to_dp(plat_data);
struct drm_display_mode *mode;
- int num_modes = 0;
if (dp->plat_data.panel)
- return num_modes;
+ return 0;
mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_DEV_ERROR(dp->dev,
"failed to create a new display mode.\n");
- return num_modes;
+ return 0;
}
drm_display_mode_from_videomode(&dp->vm, mode);
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
- return num_modes + 1;
+ return 1;
}
static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
*/
if (!ctx->raw_edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
- return -EFAULT;
+ return 0;
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
- return -ENOMEM;
+ return 0;
}
drm_connector_update_edid_property(connector, edid);
int ret;
if (!hdata->ddc_adpt)
- return -ENODEV;
+ return 0;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
- return -ENODEV;
+ return 0;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
int ret;
if (!mode)
- return -EINVAL;
+ return 0;
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
if (ret) {
drm_mode_destroy(connector->dev, mode);
- return ret;
+ return 0;
}
drm_mode_copy(mode, &imxpd->mode);
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
+ nvbo->bo.resource->bus.offset = 0;
+ nvbo->bo.resource->bus.addr = NULL;
goto retry;
}
rm->dtor = r535_devinit_dtor;
rm->post = hw->post;
- rm->disable = hw->disable;
ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
if (ret)
/**
* r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
+ * @priv: gsp pointer
+ * @fn: function number (ignored)
+ * @repv: pointer to libos print RPC
+ * @repc: message size
*
* The GSP sequencer is a list of I/O commands that the GSP can send to
* the driver to perform for various purposes. The most common usage is to
/**
* r535_gsp_libos_init() -- create the libos arguments structure
+ * @gsp: gsp pointer
*
* The logging buffers are byte queues that contain encoded printf-like
* messages from GSP-RM. They need to be decoded by a special application
/**
* nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
+ * @gsp: gsp pointer
+ * @sgt: S/G list to traverse
+ * @size: size of the image, in bytes
+ * @rx3: radix3 array to update
*
* The GSP uses a three-level page table, called radix3, to map the firmware.
* Each 64-bit "pointer" in the table is either the bus address of an entry in
unsigned long long clock)
{
const struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
- unsigned long diff = clock / 200; /* +-0.5% allowed by HDMI spec */
+ unsigned long diff = div_u64(clock, 200); /* +-0.5% allowed by HDMI spec */
long rounded_rate;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
edid = drm_get_edid(connector, vc4_hdmi->ddc);
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
if (!edid)
- return -ENODEV;
+ return 0;
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
goto err_unlock_list;
}
+ if (!args->num_batch_buffer) {
+ err = xe_vm_lock(vm, true);
+ if (err)
+ goto err_unlock_list;
+
+ if (!xe_vm_in_lr_mode(vm)) {
+ struct dma_fence *fence;
+
+ fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto err_unlock_list;
+ }
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_exec_queue_last_fence_set(q, vm, fence);
+ dma_fence_put(fence);
+ }
+
+ xe_vm_unlock(vm);
+ goto err_unlock_list;
+ }
+
vm_exec.vm = &vm->gpuvm;
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
if (xe_vm_in_lr_mode(vm)) {
goto err_exec;
}
- if (!args->num_batch_buffer) {
- if (!xe_vm_in_lr_mode(vm)) {
- struct dma_fence *fence;
-
- fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto err_exec;
- }
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
- xe_exec_queue_last_fence_set(q, vm, fence);
- dma_fence_put(fence);
- }
-
- goto err_exec;
- }
-
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
skip_retry = true;
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
return BIT(tile->id) & vma->tile_present &&
- !(BIT(tile->id) & vma->usm.tile_invalidated);
+ !(BIT(tile->id) & vma->tile_invalidated);
}
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
if (xe_vma_is_userptr(vma))
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
- vma->usm.tile_invalidated &= ~BIT(tile->id);
+ vma->tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
drm_exec_fini(&exec);
TP_ARGS(vma)
);
-DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
+DEFINE_EVENT(xe_vma, xe_vma_invalidate,
TP_PROTO(struct xe_vma *vma),
TP_ARGS(vma)
);
int err = 0;
LIST_HEAD(tmp_evict);
+ xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
lockdep_assert_held_write(&vm->lock);
/* Collect invalidated userptrs */
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
- if (err < 0)
- return err;
+ if (err == -EFAULT) {
+ list_del_init(&uvma->userptr.repin_link);
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
+ /* Wait for pending binds */
+ xe_vm_lock(vm, false);
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ err = xe_vm_invalidate_vma(&uvma->vma);
+ xe_vm_unlock(vm);
+ if (err)
+ return err;
+ } else {
+ if (err < 0)
+ return err;
+
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind,
+ &vm->rebind_list);
+ }
}
return 0;
return err;
}
- if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
+ if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
true, first_op, last_op);
} else {
u8 id;
int ret;
- xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
xe_assert(xe, !xe_vma_is_null(vma));
- trace_xe_vma_usm_invalidate(vma);
+ trace_xe_vma_invalidate(vma);
/* Check that we don't race with page-table updates */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
}
}
- vma->usm.tile_invalidated = vma->tile_mask;
+ vma->tile_invalidated = vma->tile_mask;
return 0;
}
struct work_struct destroy_work;
};
- /** @usm: unified shared memory state */
- struct {
- /** @tile_invalidated: VMA has been invalidated */
- u8 tile_invalidated;
- } usm;
+ /** @tile_invalidated: VMA has been invalidated */
+ u8 tile_invalidated;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u8 tile_mask;
return;
kobj = kobject_create_and_add("memory", tile->sysfs);
- if (!kobj)
+ if (!kobj) {
drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
+ return;
+ }
err = sysfs_create_group(kobj, &freq_group_attrs);
if (err) {
* The @get_modes callback is mostly intended to support non-probeable
* displays such as many fixed panels. Bridges that support reading
* EDID shall leave @get_modes unimplemented and implement the
- * &drm_bridge_funcs->get_edid callback instead.
+ * &drm_bridge_funcs->edid_read callback instead.
*
* This callback is optional. Bridges that implement it shall set the
* DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops.
/**
* @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display
* connected to its output. Bridges that set this flag shall implement
- * the &drm_bridge_funcs->get_edid callback.
+ * the &drm_bridge_funcs->edid_read callback.
*/
DRM_BRIDGE_OP_EDID = BIT(1),
/**
}
#define DRM_FIXED_POINT 32
-#define DRM_FIXED_POINT_HALF 16
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
static inline int drm_fixp2int_round(s64 a)
{
- return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
+ return drm_fixp2int(a + DRM_FIXED_ONE / 2);
}
static inline int drm_fixp2int_ceil(s64 a)
*
* RETURNS:
*
- * The number of modes added by calling drm_mode_probed_add().
+ * The number of modes added by calling drm_mode_probed_add(). Return 0
+ * on failures (no modes) instead of negative error codes.
*/
int (*get_modes)(struct drm_connector *connector);