Implements DRM's atomic KMS interfaces using DC. Signed-off-by: Harry Wentland <harry.wentland@xxxxxxx> Reviewed-by: Alex Deucher <alexander.deucher@xxxxxxx> --- drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile | 17 + drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c | 1468 +++++++++++ drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h | 168 ++ .../gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_helpers.c | 474 ++++ drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c | 820 +++++++ drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h | 122 + .../drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.c | 480 ++++ .../drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.h | 36 + .../gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_services.c | 457 ++++ .../gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c | 2577 ++++++++++++++++++++ .../gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h | 100 + drivers/gpu/drm/amd/dal/dc/dm_services.h | 17 - 12 files changed, 6719 insertions(+), 17 deletions(-) create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_helpers.c create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.c create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.h create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_services.c create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile new file mode 100644 index 000000000000..0f365c65342e --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile @@ -0,0 +1,17 @@ +# +# Makefile for the 'dm' sub-component of DAL. +# It provides the control and status of dm blocks. + + + +AMDGPUDM = amdgpu_dm_types.o amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o + +ifneq ($(CONFIG_DRM_AMD_DAL),) +AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o +endif + +subdir-ccflags-y += -I$(FULL_AMD_DAL_PATH)/dc + +AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM)) + +AMD_DAL_FILES += $(AMDGPU_DM) diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c new file mode 100644 index 000000000000..0ceb505355e8 --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c @@ -0,0 +1,1468 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services_types.h" +#include "dc.h" + +#include "vid.h" +#include "amdgpu.h" +#include "atom.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_types.h" + +#include "amd_shared.h" +#include "amdgpu_dm_irq.h" +#include "dm_helpers.h" + +#include "dce/dce_11_0_d.h" +#include "dce/dce_11_0_sh_mask.h" +#include "dce/dce_11_0_enum.h" +#include "ivsrcid/ivsrcid_vislands30.h" + +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include <linux/module.h> +#include <linux/moduleparam.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_dp_mst_helper.h> + +/* TODO: Remove when mc access work around is removed */ +static const u32 crtc_offsets[] = +{ + CRTC0_REGISTER_OFFSET, + CRTC1_REGISTER_OFFSET, + CRTC2_REGISTER_OFFSET, + CRTC3_REGISTER_OFFSET, + CRTC4_REGISTER_OFFSET, + CRTC5_REGISTER_OFFSET, + CRTC6_REGISTER_OFFSET +}; +/* TODO: End of when Remove mc access work around is removed */ + +/* Define variables here + * These values will be passed to DAL for feature enable purpose + * Disable ALL for HDMI light up + * TODO: follow up if need this mechanism*/ +struct dal_override_parameters display_param = { + .bool_param_enable_mask = 0, + .bool_param_values = 0, + .int_param_values[DAL_PARAM_MAX_COFUNC_NON_DP_DISPLAYS] = DAL_PARAM_INVALID_INT, + .int_param_values[DAL_PARAM_DRR_SUPPORT] = DAL_PARAM_INVALID_INT, +}; + +/* Debug facilities */ +#define AMDGPU_DM_NOT_IMPL(fmt, ...) \ + DRM_INFO("DM_NOT_IMPL: " fmt, ##__VA_ARGS__) + +/* + * dm_vblank_get_counter + * + * @brief + * Get counter for number of vertical blanks + * + * @param + * struct amdgpu_device *adev - [in] desired amdgpu device + * int disp_idx - [in] which CRTC to get the counter from + * + * @return + * Counter for vertical blanks + */ +static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) + return 0; + else { + struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; + + if (NULL == acrtc->target) { + DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc); + return 0; + } + + return dc_target_get_vblank_counter(acrtc->target); + } +} + +static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) + return -EINVAL; + +/* TODO: #DAL3 Implement scanoutpos + dal_get_crtc_scanoutpos(adev->dm.dal, crtc, vbl, position); +*/ + return 0; +} + +static u32 dm_hpd_get_gpio_reg(struct amdgpu_device *adev) +{ + return mmDC_GPIO_HPD_A; +} + + +static bool dm_is_display_hung(struct amdgpu_device *adev) +{ + /* TODO: #DAL3 need to replace + u32 crtc_hung = 0; + u32 i, j, tmp; + + crtc_hung = dal_get_connected_targets_vector(adev->dm.dal); + + for (j = 0; j < 10; j++) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (crtc_hung & (1 << i)) { + int32_t vpos1, hpos1; + int32_t vpos2, hpos2; + + tmp = dal_get_crtc_scanoutpos( + adev->dm.dal, + i, + &vpos1, + &hpos1); + udelay(10); + tmp = dal_get_crtc_scanoutpos( + adev->dm.dal, + i, + &vpos2, + &hpos2); + + if (hpos1 != hpos2 && vpos1 != vpos2) + crtc_hung &= ~(1 << i); + } + } + + if (crtc_hung == 0) + return false; + } +*/ + return true; +} + +/* TODO: Remove mc access work around*/ +static void dm_stop_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + + u32 crtc_enabled, tmp; + int i; + + save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); + save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); + + /* blank the display controllers */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { +#if 0 + u32 frame_count; + int j; + + save->crtc_enabled[i] = true; + tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { + amdgpu_display_vblank_wait(adev, i); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); + WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + /* wait for the next frame */ + frame_count = amdgpu_display_vblank_get_counter(adev, i); + for (j = 0; j < adev->usec_timeout; j++) { + if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) + break; + udelay(1); + } + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); + WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { + tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); + WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); + } +#else + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + save->crtc_enabled[i] = false; + /* ***** */ +#endif + } else { + save->crtc_enabled[i] = false; + } + } +} + + +static void dm_resume_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + + u32 tmp, frame_count; + int i, j; + + /* update crtc base addresses */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + upper_32_bits(adev->mc.vram_start)); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + upper_32_bits(adev->mc.vram_start)); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], + (u32)adev->mc.vram_start); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], + (u32)adev->mc.vram_start); + + if (save->crtc_enabled[i]) { + tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { + tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); + WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); + WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { + tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); + WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); + } + for (j = 0; j < adev->usec_timeout; j++) { + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) + break; + udelay(1); + } + tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + /* wait for the next frame */ + frame_count = amdgpu_display_vblank_get_counter(adev, i); + for (j = 0; j < adev->usec_timeout; j++) { + if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) + break; + udelay(1); + } + } + } + + WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); + WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); + + /* Unlock vga access */ + WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); + mdelay(1); + WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); +} + +/* End of TODO: Remove mc access work around*/ + +static bool dm_is_idle(void *handle) +{ + /* XXX todo */ + return true; +} + +static int dm_wait_for_idle(void *handle) +{ + /* XXX todo */ + return 0; +} + +static void dm_print_status(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "DCE 10.x registers\n"); + /* XXX todo */ +} + +static int dm_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0, tmp; + + if (dm_is_display_hung(adev)) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; + + if (srbm_soft_reset) { + dm_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + dm_print_status(adev); + } + return 0; +} + +static struct amdgpu_crtc *get_crtc_by_target( + struct amdgpu_device *adev, + const struct dc_target *dc_target) +{ + struct drm_device *dev = adev->ddev; + struct drm_crtc *crtc; + struct amdgpu_crtc *amdgpu_crtc; + + /* + * following if is check inherited from both functions where this one is + * used now. Need to be checked why it could happen. + */ + if (dc_target == NULL) + return adev->mode_info.crtcs[0]; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (amdgpu_crtc->target == dc_target) + return amdgpu_crtc; + } + + return NULL; +} + +static void dm_pflip_high_irq(void *interrupt_params) +{ + struct amdgpu_flip_work *works; + struct amdgpu_crtc *amdgpu_crtc; + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + unsigned long flags; + const struct dc *dc = irq_params->adev->dm.dc; + const struct dc_target *dc_target = + dc_get_target_on_irq_source(dc, irq_params->irq_src); + + amdgpu_crtc = get_crtc_by_target(adev, dc_target); + + /* IRQ could occur when in initial stage */ + if(amdgpu_crtc == NULL) + return; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + works = amdgpu_crtc->pflip_works; + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ + DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " + "AMDGPU_FLIP_SUBMITTED(%d)\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + return; + } + + /* page flip completed. clean up */ + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + amdgpu_crtc->pflip_works = NULL; + + /* wakeup usersapce */ + if(works->event) + drm_send_vblank_event( + adev->ddev, + amdgpu_crtc->crtc_id, + works->event); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + + drm_crtc_vblank_put(&amdgpu_crtc->base); + queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); +} + +static void dm_crtc_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + const struct dc *dc = irq_params->adev->dm.dc; + const struct dc_target *dc_target = + dc_get_target_on_irq_source(dc, irq_params->irq_src); + uint8_t crtc_index = 0; + struct amdgpu_crtc *acrtc = get_crtc_by_target(adev, dc_target); + + if (acrtc) + crtc_index = acrtc->crtc_id; + + drm_handle_vblank(adev->ddev, crtc_index); + +} + +static int dm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int dm_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +/* Prototypes of private functions */ +static int dm_early_init(void* handle); + +static void hotplug_notify_work_func(struct work_struct *work) +{ + struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work); + struct drm_device *dev = dm->ddev; + + drm_kms_helper_hotplug_event(dev); +} + +/* Init display KMS + * + * Returns 0 on success + */ +int amdgpu_dm_init(struct amdgpu_device *adev) +{ + struct dal_init_data init_data; + struct drm_device *ddev = adev->ddev; + adev->dm.ddev = adev->ddev; + adev->dm.adev = adev; + + /* Zero all the fields */ + memset(&init_data, 0, sizeof(init_data)); + + /* initialize DAL's lock (for SYNC context use) */ + spin_lock_init(&adev->dm.dal_lock); + + /* initialize DAL's mutex */ + mutex_init(&adev->dm.dal_mutex); + + if(amdgpu_dm_irq_init(adev)) { + DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); + goto error; + } + + if (ddev->pdev) { + init_data.bdf_info.DEVICE_NUMBER = PCI_SLOT(ddev->pdev->devfn); + init_data.bdf_info.FUNCTION_NUMBER = + PCI_FUNC(ddev->pdev->devfn); + if (ddev->pdev->bus) + init_data.bdf_info.BUS_NUMBER = ddev->pdev->bus->number; + } + + init_data.display_param = display_param; + + init_data.asic_id.chip_family = adev->family; + + init_data.asic_id.pci_revision_id = adev->rev_id; + init_data.asic_id.hw_internal_rev = adev->external_rev_id; + + init_data.asic_id.vram_width = adev->mc.vram_width; + /* TODO: initialize init_data.asic_id.vram_type here!!!! */ + init_data.asic_id.atombios_base_address = + adev->mode_info.atom_context->bios; + init_data.asic_id.runtime_flags.flags.bits.SKIP_POWER_DOWN_ON_RESUME = 1; + + if (adev->asic_type == CHIP_CARRIZO) + init_data.asic_id.runtime_flags.flags.bits.GNB_WAKEUP_SUPPORTED = 1; + + init_data.driver = adev; + + adev->dm.cgs_device = amdgpu_cgs_create_device(adev); + + if (!adev->dm.cgs_device) { + DRM_ERROR("amdgpu: failed to create cgs device.\n"); + goto error; + } + + init_data.cgs_device = adev->dm.cgs_device; + + adev->dm.dal = NULL; + + /* enable gpu scaling in DAL */ + init_data.display_param.bool_param_enable_mask |= + 1 << DAL_PARAM_ENABLE_GPU_SCALING; + init_data.display_param.bool_param_values |= + 1 << DAL_PARAM_ENABLE_GPU_SCALING; + + init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; + + /* Display Core create. */ + adev->dm.dc = dc_create(&init_data); + + INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func); + + if (amdgpu_dm_initialize_drm_device(adev)) { + DRM_ERROR( + "amdgpu: failed to initialize sw for display support.\n"); + goto error; + } + + /* Update the actual used number of crtc */ + adev->mode_info.num_crtc = adev->dm.display_indexes_num; + + /* TODO: Add_display_info? */ + + /* TODO use dynamic cursor width */ + adev->ddev->mode_config.cursor_width = 128; + adev->ddev->mode_config.cursor_height = 128; + + if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) { + DRM_ERROR( + "amdgpu: failed to initialize sw for display support.\n"); + goto error; + } + + DRM_INFO("KMS initialized.\n"); + + return 0; +error: + amdgpu_dm_fini(adev); + + return -1; +} + +void amdgpu_dm_fini(struct amdgpu_device *adev) +{ + amdgpu_dm_destroy_drm_device(&adev->dm); + /* + * TODO: pageflip, vlank interrupt + * + * amdgpu_dm_irq_fini(adev); + */ + + if (adev->dm.cgs_device) { + amdgpu_cgs_destroy_device(adev->dm.cgs_device); + adev->dm.cgs_device = NULL; + } + + /* DC Destroy TODO: Replace destroy DAL */ + { + dc_destroy(&adev->dm.dc); + } + return; +} + +/* moved from amdgpu_dm_kms.c */ +void amdgpu_dm_destroy() +{ +} + +static int dm_sw_init(void *handle) +{ + return 0; +} + +static int dm_sw_fini(void *handle) +{ + return 0; +} + + +static void detect_link_for_all_connectors(struct drm_device *dev) +{ + struct amdgpu_connector *aconnector; + struct drm_connector *connector; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + if (aconnector->dc_link->type == dc_connection_mst_branch) { + DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + + if (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) < 0) { + DRM_ERROR("DM_MST: Failed to start MST\n"); + ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single; + } + } + } + + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + + +static int dm_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* Create DAL display manager */ + amdgpu_dm_init(adev); + + amdgpu_dm_hpd_init(adev); + + detect_link_for_all_connectors(adev->ddev); + + + + return 0; +} + +static int dm_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_dm_hpd_fini(adev); + + amdgpu_dm_irq_fini(adev); + + return 0; +} + +static int dm_display_suspend(struct drm_device *ddev) +{ + struct drm_mode_config *config = &ddev->mode_config; + struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; + struct drm_atomic_state *state; + struct drm_crtc *crtc; + unsigned crtc_mask = 0; + int ret = 0; + + if (WARN_ON(!ctx)) + return 0; + + lockdep_assert_held(&ctx->ww_ctx); + + state = drm_atomic_state_alloc(ddev); + if (WARN_ON(!state)) + return -ENOMEM; + + state->acquire_ctx = ctx; + state->allow_modeset = true; + + /* Set all active crtcs to inactive, to turn off displays*/ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + struct drm_crtc_state *crtc_state = + drm_atomic_get_crtc_state(state, crtc); + + ret = PTR_ERR_OR_ZERO(crtc_state); + if (ret) + goto free; + + if (!crtc_state->active) + continue; + + crtc_state->active = false; + crtc_mask |= (1 << drm_crtc_index(crtc)); + } + + if (crtc_mask) { + ret = drm_atomic_commit(state); + + /* In case of failure, revert everything we did*/ + if (!ret) { + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) + if (crtc_mask & (1 << drm_crtc_index(crtc))) + crtc->state->active = true; + + return ret; + } + } + +free: + if (ret) { + DRM_ERROR("Suspending crtc's failed with %i\n", ret); + drm_atomic_state_free(state); + return ret; + } + + return 0; +} +static int dm_suspend(void *handle) +{ + struct amdgpu_device *adev = handle; + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_device *ddev = adev->ddev; + int ret = 0; + + drm_modeset_lock_all(ddev); + ret = dm_display_suspend(ddev); + drm_modeset_unlock_all(ddev); + + if (ret) + goto fail; + + dc_set_power_state( + dm->dc, + DC_ACPI_CM_POWER_STATE_D3, + DC_VIDEO_POWER_SUSPEND); + + amdgpu_dm_irq_suspend(adev); +fail: + return ret; +} + +static int dm_display_resume(struct drm_device *ddev) +{ + int ret = 0; + struct drm_connector *connector; + + struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); + struct drm_plane *plane; + struct drm_crtc *crtc; + + if (!state) + return ENOMEM; + + state->acquire_ctx = ddev->mode_config.acquire_ctx; + + /* Construct an atomic state to restore previous display setting*/ + /* Attach crtcs to drm_atomic_state*/ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + struct drm_crtc_state *crtc_state = + drm_atomic_get_crtc_state(state, crtc); + + ret = PTR_ERR_OR_ZERO(crtc_state); + if (ret) + goto err; + + /* force a restore */ + crtc_state->mode_changed = true; + } + + /* Attach planes to drm_atomic_state*/ + list_for_each_entry(plane, &ddev->mode_config.plane_list, head) { + ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, plane)); + if (ret) + goto err; + } + + /* Attach connectors to drm_atomic_state*/ + list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { + ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, connector)); + if (ret) + goto err; + } + + /* Call commit internally with the state we just constructed */ + ret = drm_atomic_commit(state); + if (!ret) + return 0; + +err: + DRM_ERROR("Restoring old state failed with %i\n", ret); + drm_atomic_state_free(state); + + return ret; +} + +static int dm_resume(void *handle) +{ + struct amdgpu_device *adev = handle; + struct drm_device *ddev = adev->ddev; + struct amdgpu_display_manager *dm = &adev->dm; + struct amdgpu_connector *aconnector; + struct drm_connector *connector; + int ret = 0; + + /* power on hardware */ + dc_set_power_state( + dm->dc, + DC_ACPI_CM_POWER_STATE_D0, + DC_VIDEO_POWER_ON); + + /* Do detection*/ + list_for_each_entry(connector, + &ddev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + dc_link_detect(aconnector->dc_link, false); + aconnector->dc_sink = NULL; + amdgpu_dm_update_connector_after_detect(aconnector); + } + + + drm_modeset_lock_all(ddev); + ret = dm_display_resume(ddev); + drm_modeset_unlock_all(ddev); + + drm_kms_helper_hotplug_event(ddev); + + /* program HPD filter*/ + dc_resume(dm->dc); + /* resume IRQ */ + amdgpu_dm_irq_resume(adev); + + return ret; +} +const struct amd_ip_funcs amdgpu_dm_funcs = { + .early_init = dm_early_init, + .late_init = NULL, + .sw_init = dm_sw_init, + .sw_fini = dm_sw_fini, + .hw_init = dm_hw_init, + .hw_fini = dm_hw_fini, + .suspend = dm_suspend, + .resume = dm_resume, + .is_idle = dm_is_idle, + .wait_for_idle = dm_wait_for_idle, + .soft_reset = dm_soft_reset, + .print_status = dm_print_status, + .set_clockgating_state = dm_set_clockgating_state, + .set_powergating_state = dm_set_powergating_state, +}; + +/* TODO: it is temporary non-const, should fixed later */ +static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { + .atomic_check = amdgpu_dm_atomic_check, + .atomic_commit = amdgpu_dm_atomic_commit +}; + + +void amdgpu_dm_update_connector_after_detect( + struct amdgpu_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + const struct dc_sink *sink; + + /* MST handled by drm_mst framework */ + if (aconnector->mst_mgr.mst_state == true) + return; + + sink = aconnector->dc_link->local_sink; + + /* + * TODO: temporary guard to look for proper fix + * if this sink is MST sink, we should not do anything + */ + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + return; + + if (aconnector->dc_sink == sink) { + /* We got a DP short pulse (Link Loss, DP CTS, etc...). + * Do nothing!! */ + DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n", + aconnector->connector_id); + return; + } + + DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", + aconnector->connector_id, aconnector->dc_sink, sink); + + mutex_lock(&dev->mode_config.mutex); + + /* 1. Update status of the drm connector + * 2. Send an event and let userspace tell us what to do */ + if (sink) { + /* TODO: check if we still need the S3 mode update workaround. + * If yes, put it here. */ + + aconnector->dc_sink = sink; + if (sink->dc_edid.length == 0) + aconnector->edid = NULL; + else { + aconnector->edid = + (struct edid *) sink->dc_edid.raw_edid; + drm_mode_connector_update_edid_property(connector, + aconnector->edid); + } + } else { + drm_mode_connector_update_edid_property(connector, NULL); + aconnector->num_modes = 0; + aconnector->dc_sink = NULL; + } + + mutex_unlock(&dev->mode_config.mutex); +} + +static void handle_hpd_irq(void *param) +{ + struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + + /* In case of failure or MST no need to update connector status or notify the OS + * since (for MST case) MST does this in it's own context. + */ + if (dc_link_detect(aconnector->dc_link, false)) { + amdgpu_dm_update_connector_after_detect(aconnector); + drm_kms_helper_hotplug_event(dev); + } +} + +static void handle_hpd_rx_irq(void *param) +{ + struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + bool is_mst_root_connector = aconnector->mst_mgr.mst_state; + + if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) && + !is_mst_root_connector) { + /* Downstream Port status changed. */ + if (dc_link_detect(aconnector->dc_link, false)) { + amdgpu_dm_update_connector_after_detect(aconnector); + drm_kms_helper_hotplug_event(dev); + } + } + + if (is_mst_root_connector) + dm_helpers_dp_mst_handle_mst_hpd_rx_irq(param); +} + +static void register_hpd_handlers(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + struct amdgpu_connector *aconnector; + const struct dc_link *dc_link; + struct dc_interrupt_params int_params = {0}; + + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + + list_for_each_entry(connector, + &dev->mode_config.connector_list, head) { + + aconnector = to_amdgpu_connector(connector); + dc_link = aconnector->dc_link; + + int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; + int_params.irq_source = dc_link->irq_source_hpd; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + handle_hpd_irq, + (void *) aconnector); + + if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + + /* Also register for DP short pulse (hpd_rx). */ + int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; + int_params.irq_source = dc_link->irq_source_hpd_rx; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + handle_hpd_rx_irq, + (void *) aconnector); + } + } +} + +/* Register IRQ sources and initialize IRQ callbacks */ +static int dce110_register_irq_handlers(struct amdgpu_device *adev) +{ + struct dc *dc = adev->dm.dc; + struct common_irq_params *c_irq_params; + struct dc_interrupt_params int_params = {0}; + int r; + int i; + struct dc_caps caps = { 0 }; + + dc_get_caps(dc, &caps); + + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + + /* Actions of amdgpu_irq_add_id(): + * 1. Register a set() function with base driver. + * Base driver will call set() function to enable/disable an + * interrupt in DC hardware. + * 2. Register amdgpu_dm_irq_handler(). + * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts + * coming from DC hardware. + * amdgpu_dm_irq_handler() will re-direct the interrupt to DC + * for acknowledging and handling. */ + + for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; + i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { + r = amdgpu_irq_add_id(adev, i, &adev->crtc_irq); + if (r) { + DRM_ERROR("Failed to add crtc irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params); + } + + for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; + i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { + r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); + if (r) { + DRM_ERROR("Failed to add page flip irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params); + + } + + /* HPD */ + r = amdgpu_irq_add_id(adev, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, + &adev->hpd_irq); + if (r) { + DRM_ERROR("Failed to add hpd irq id!\n"); + return r; + } + + register_hpd_handlers(adev); + + return 0; +} + +static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) +{ + int r; + + adev->mode_info.mode_config_initialized = true; + + amdgpu_dm_mode_funcs.fb_create = + amdgpu_mode_funcs.fb_create; + amdgpu_dm_mode_funcs.output_poll_changed = + amdgpu_mode_funcs.output_poll_changed; + + adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + adev->ddev->mode_config.preferred_depth = 24; + adev->ddev->mode_config.prefer_shadow = 1; + + adev->ddev->mode_config.fb_base = adev->mc.aper_base; + + r = amdgpu_modeset_create_props(adev); + if (r) + return r; + + return 0; +} + +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ + defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + +static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) +{ + struct amdgpu_display_manager *dm = bl_get_data(bd); + + if (dc_link_set_backlight_level(dm->backlight_link, + bd->props.brightness)) + return 0; + else + return 1; +} + +static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) +{ + return bd->props.brightness; +} + +static const struct backlight_ops amdgpu_dm_backlight_ops = { + .get_brightness = amdgpu_dm_backlight_get_brightness, + .update_status = amdgpu_dm_backlight_update_status, +}; + +void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) +{ + char bl_name[16]; + struct backlight_properties props = { 0 }; + + props.max_brightness = AMDGPU_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; + + snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", + dm->adev->ddev->primary->index); + + dm->backlight_dev = backlight_device_register(bl_name, + dm->adev->ddev->dev, + dm, + &amdgpu_dm_backlight_ops, + &props); + + if (NULL == dm->backlight_dev) + DRM_ERROR("DM: Backlight registration failed!\n"); + else + DRM_INFO("DM: Registered Backlight device: %s\n", bl_name); +} + +#endif + +/* In this architecture, the association + * connector -> encoder -> crtc + * id not really requried. The crtc and connector will hold the + * display_index as an abstraction to use with DAL component + * + * Returns 0 on success + */ +int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) +{ + struct amdgpu_display_manager *dm = &adev->dm; + uint32_t i; + struct amdgpu_connector *aconnector; + struct amdgpu_encoder *aencoder; + struct amdgpu_crtc *acrtc; + struct dc_caps caps = { 0 }; + uint32_t link_cnt; + + dc_get_caps(dm->dc, &caps); + link_cnt = caps.max_links; + + if (amdgpu_dm_mode_config_init(dm->adev)) { + DRM_ERROR("DM: Failed to initialize mode config\n"); + return -1; + } + + for (i = 0; i < caps.max_targets; i++) { + acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); + if (!acrtc) + goto fail; + + if (amdgpu_dm_crtc_init( + dm, + acrtc, + i)) { + DRM_ERROR("KMS: Failed to initialize crtc\n"); + kfree(acrtc); + goto fail; + } + } + + dm->display_indexes_num = caps.max_targets; + + /* loops over all connectors on the board */ + for (i = 0; i < link_cnt; i++) { + + if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { + DRM_ERROR( + "KMS: Cannot support more than %d display indexes\n", + AMDGPU_DM_MAX_DISPLAY_INDEX); + continue; + } + + aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); + if (!aconnector) + goto fail; + + aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); + if (!aencoder) { + goto fail_free_connector; + } + + if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { + DRM_ERROR("KMS: Failed to initialize encoder\n"); + goto fail_free_encoder; + } + + if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { + DRM_ERROR("KMS: Failed to initialize connector\n"); + goto fail_free_connector; + } + + if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true)) + amdgpu_dm_update_connector_after_detect( + aconnector); + } + + /* Software is initialized. Now we can register interrupt handlers. */ + switch (adev->asic_type) { + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_CARRIZO: + if (dce110_register_irq_handlers(dm->adev)) { + DRM_ERROR("DM: Failed to initialize IRQ\n"); + return -1; + } + break; + default: + DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); + return -1; + } + + drm_mode_config_reset(dm->ddev); + + return 0; +fail_free_encoder: + kfree(aencoder); +fail_free_connector: + kfree(aconnector); +fail: + return -1; +} + +void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) +{ + drm_mode_config_cleanup(dm->ddev); + return; +} + +/****************************************************************************** + * amdgpu_display_funcs functions + *****************************************************************************/ + + +static void dm_set_vga_render_state(struct amdgpu_device *adev, + bool render) +{ + u32 tmp; + + /* Lockout access through VGA aperture*/ + tmp = RREG32(mmVGA_HDP_CONTROL); + if (render) + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); + else + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + WREG32(mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + if (render) + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); + else + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); +} + +/** + * dm_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line buffer allocation. + */ +static void dm_bandwidth_update(struct amdgpu_device *adev) +{ + AMDGPU_DM_NOT_IMPL("%s\n", __func__); +} + +static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, + u8 level) +{ + /* TODO: translate amdgpu_encoder to display_index and call DAL */ + AMDGPU_DM_NOT_IMPL("%s\n", __func__); +} + +static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder) +{ + /* TODO: translate amdgpu_encoder to display_index and call DAL */ + AMDGPU_DM_NOT_IMPL("%s\n", __func__); + return 0; +} + +/****************************************************************************** + * Page Flip functions + ******************************************************************************/ + +void amdgpu_dm_flip_cleanup( + struct amdgpu_device *adev, + struct amdgpu_crtc *acrtc) +{ + int r; + struct amdgpu_flip_work *works = acrtc->pflip_works; + + acrtc->pflip_works = NULL; + acrtc->pflip_status = AMDGPU_FLIP_NONE; + + if (works) { + if(works->event) + drm_send_vblank_event( + adev->ddev, + acrtc->crtc_id, + works->event); + + r = amdgpu_bo_reserve(works->old_rbo, false); + if (likely(r == 0)) { + r = amdgpu_bo_unpin(works->old_rbo); + if (unlikely(r != 0)) { + DRM_ERROR("failed to unpin buffer after flip\n"); + } + amdgpu_bo_unreserve(works->old_rbo); + } else + DRM_ERROR("failed to reserve buffer after flip\n"); + + amdgpu_bo_unref(&works->old_rbo); + kfree(works->shared); + kfree(works); + } +} + +/** + * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered + * via DRM IOCTL, by user mode. + * + * @adev: amdgpu_device pointer + * @crtc_id: crtc to cleanup pageflip on + * @crtc_base: new address of the crtc (GPU MC address) + * + * Does the actual pageflip (surface address update). + */ +static void dm_page_flip(struct amdgpu_device *adev, + int crtc_id, u64 crtc_base) +{ + struct amdgpu_crtc *acrtc; + struct dc_target *target; + struct dc_flip_addrs addr = { {0} }; + + /* + * TODO risk of concurrency issues + * + * This should guarded by the dal_mutex but we can't do this since the + * caller uses a spin_lock on event_lock. + * + * If we wait on the dal_mutex a second page flip interrupt might come, + * spin on the event_lock, disabling interrupts while it does so. At + * this point the core can no longer be pre-empted and return to the + * thread that waited on the dal_mutex and we're deadlocked. + * + * With multiple cores the same essentially happens but might just take + * a little longer to lock up all cores. + * + * The reason we should lock on dal_mutex is so that we can be sure + * nobody messes with acrtc->target after we read and check its value. + * + * We might be able to fix our concurrency issues with a work queue + * where we schedule all work items (mode_set, page_flip, etc.) and + * execute them one by one. Care needs to be taken to still deal with + * any potential concurrency issues arising from interrupt calls. + */ + + acrtc = adev->mode_info.crtcs[crtc_id]; + target = acrtc->target; + + /* + * Received a page flip call after the display has been reset. + * Just return in this case. Everything should be clean-up on reset. + */ + if (!target) + return; + + addr.address.grph.addr.low_part = lower_32_bits(crtc_base); + addr.address.grph.addr.high_part = upper_32_bits(crtc_base); + + dc_flip_surface_addrs( + adev->dm.dc, + dc_target_get_status(target)->surfaces, + &addr, 1); +} + +static const struct amdgpu_display_funcs display_funcs = { + .set_vga_render_state = dm_set_vga_render_state, + .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ + .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ + .vblank_wait = NULL, /* not called anywhere */ + .is_display_hung = dm_is_display_hung,/* called unconditionally */ + .backlight_set_level = + dm_set_backlight_level,/* called unconditionally */ + .backlight_get_level = + dm_get_backlight_level,/* called unconditionally */ + .hpd_sense = NULL,/* called unconditionally */ + .hpd_set_polarity = NULL, /* called unconditionally */ + .hpd_get_gpio_reg = dm_hpd_get_gpio_reg,/* called unconditionally */ + .page_flip = dm_page_flip, /* called unconditionally */ + .page_flip_get_scanoutpos = + dm_crtc_get_scanoutpos,/* called unconditionally */ + .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ + .add_connector = NULL, /* VBIOS parsing. DAL does it. */ + .stop_mc_access = dm_stop_mc_access, /* called unconditionally */ + .resume_mc_access = dm_resume_mc_access, /* called unconditionally */ +}; + +static void set_display_funcs(struct amdgpu_device *adev) +{ + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &display_funcs; +} + +static int dm_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + set_display_funcs(adev); + amdgpu_dm_set_irq_funcs(adev); + + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 7; + break; + case CHIP_CARRIZO: + adev->mode_info.num_crtc = 3; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 9; + break; + default: + DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); + return -EINVAL; + } + + /* Note: Do NOT change adev->audio_endpt_rreg and + * adev->audio_endpt_wreg because they are initialised in + * amdgpu_device_init() */ + + + + return 0; +} + + +bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm) +{ + /* TODO */ + return true; +} + +bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm) +{ + /* TODO */ + return true; +} diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h new file mode 100644 index 000000000000..c4ae90b31523 --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h @@ -0,0 +1,168 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_H__ +#define __AMDGPU_DM_H__ + +/* +#include "linux/switch.h" +*/ + +/* + * This file contains the definition for amdgpu_display_manager + * and its API for amdgpu driver's use. + * This component provides all the display related functionality + * and this is the only component that calls DAL API. + * The API contained here intended for amdgpu driver use. + * The API that is called directly from KMS framework is located + * in amdgpu_dm_kms.h file + */ + +#define AMDGPU_DM_MAX_DISPLAY_INDEX 31 +/* +#include "include/amdgpu_dal_power_if.h" +#include "amdgpu_dm_irq.h" +*/ + +#include "irq_types.h" + +/* Forward declarations */ +struct amdgpu_device; +struct drm_device; +struct amdgpu_dm_irq_handler_data; + +struct amdgpu_dm_prev_state { + struct drm_framebuffer *fb; + int32_t x; + int32_t y; + struct drm_display_mode mode; +}; + +struct common_irq_params { + struct amdgpu_device *adev; + enum dc_irq_source irq_src; +}; + +struct irq_list_head { + struct list_head head; + /* In case this interrupt needs post-processing, 'work' will be queued*/ + struct work_struct work; +}; + +struct amdgpu_display_manager { + struct dal *dal; + struct dc *dc; + void *cgs_device; + /* lock to be used when DAL is called from SYNC IRQ context */ + spinlock_t dal_lock; + + struct amdgpu_device *adev; /*AMD base driver*/ + struct drm_device *ddev; /*DRM base driver*/ + u16 display_indexes_num; + + struct amdgpu_dm_prev_state prev_state; + + /* + * 'irq_source_handler_table' holds a list of handlers + * per (DAL) IRQ source. + * + * Each IRQ source may need to be handled at different contexts. + * By 'context' we mean, for example: + * - The ISR context, which is the direct interrupt handler. + * - The 'deferred' context - this is the post-processing of the + * interrupt, but at a lower priority. + * + * Note that handlers are called in the same order as they were + * registered (FIFO). + */ + struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; + struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; + + struct common_irq_params + pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; + + struct common_irq_params + vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1]; + + /* this spin lock synchronizes access to 'irq_handler_list_table' */ + spinlock_t irq_handler_list_table_lock; + + /* Timer-related data. */ + struct list_head timer_handler_list; + struct workqueue_struct *timer_workqueue; + + /* Use dal_mutex for any activity which is NOT syncronized by + * DRM mode setting locks. + * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without* + * DRM mode setting locks being acquired. This is where dal_mutex + * is acquired before calling into DAL. */ + struct mutex dal_mutex; + + struct backlight_device *backlight_dev; + + const struct dc_link *backlight_link; + + struct work_struct mst_hotplug_work; +}; + + +/* basic init/fini API */ +int amdgpu_dm_init(struct amdgpu_device *adev); + +void amdgpu_dm_fini(struct amdgpu_device *adev); + +void amdgpu_dm_destroy(void); + +/* initializes drm_device display related structures, based on the information + * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, + * drm_encoder, drm_mode_config + * + * Returns 0 on success + */ +int amdgpu_dm_initialize_drm_device( + struct amdgpu_device *adev); + +/* removes and deallocates the drm structures, created by the above function */ +void amdgpu_dm_destroy_drm_device( + struct amdgpu_display_manager *dm); + +/* Locking/Mutex */ +bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm); + +bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm); + +/* Register "Backlight device" accessible by user-mode. */ +void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm); + +void amdgpu_dm_flip_cleanup( + struct amdgpu_device *adev, + struct amdgpu_crtc *acrtc); + +extern const struct amd_ip_funcs amdgpu_dm_funcs; + +void amdgpu_dm_update_connector_after_detect( + struct amdgpu_connector *aconnector); + +#endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_helpers.c new file mode 100644 index 000000000000..39c5c98fe918 --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_helpers.c @@ -0,0 +1,474 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/string.h> +#include <linux/acpi.h> +#include <linux/version.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/amdgpu_drm.h> +#include <drm/drm_edid.h> + +#include "dm_services.h" +#include "amdgpu.h" +#include "dc.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_irq.h" +#include "amdgpu_dm_types.h" + +/* dm_helpers_parse_edid_caps + * + * Parse edid caps + * + * @edid: [in] pointer to edid + * edid_caps: [in] pointer to edid caps + * @return + * void + * */ +enum dc_edid_status dm_helpers_parse_edid_caps( + struct dc_context *ctx, + const struct dc_edid *edid, + struct dc_edid_caps *edid_caps) +{ + struct edid *edid_buf = (struct edid *) edid->raw_edid; + struct cea_sad *sads; + int sad_count = -1; + int sadb_count = -1; + int i = 0; + int j = 0; + uint8_t *sadb = NULL; + + enum dc_edid_status result = EDID_OK; + + if (!edid_caps || !edid) + return EDID_BAD_INPUT; + + if (!drm_edid_is_valid(edid_buf)) + result = EDID_BAD_CHECKSUM; + + edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | + ((uint16_t) edid_buf->mfg_id[1])<<8; + edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | + ((uint16_t) edid_buf->prod_code[1])<<8; + edid_caps->serial_number = edid_buf->serial; + edid_caps->manufacture_week = edid_buf->mfg_week; + edid_caps->manufacture_year = edid_buf->mfg_year; + + /* One of the four detailed_timings stores the monitor name. It's + * stored in an array of length 13. */ + for (i = 0; i < 4; i++) { + if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) { + while (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] && j < 13) { + if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n') + break; + + edid_caps->display_name[j] = + edid_buf->detailed_timings[i].data.other_data.data.str.str[j]; + j++; + } + } + } + + sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); + if (sad_count <= 0) { + DRM_INFO("SADs count is: %d, don't need to read it\n", + sad_count); + return result; + } + + edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; + for (i = 0; i < edid_caps->audio_mode_count; ++i) { + struct cea_sad *sad = &sads[i]; + + edid_caps->audio_modes[i].format_code = sad->format; + edid_caps->audio_modes[i].channel_count = sad->channels; + edid_caps->audio_modes[i].sample_rate = sad->freq; + edid_caps->audio_modes[i].sample_size = sad->byte2; + } + + sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); + + if (sadb_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); + sadb_count = 0; + } + + if (sadb_count) + edid_caps->speaker_flags = sadb[0]; + else + edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; + + kfree(sads); + kfree(sadb); + + return result; +} + +static struct amdgpu_connector *get_connector_for_sink( + struct drm_device *dev, + const struct dc_sink *sink) +{ + struct drm_connector *connector; + struct amdgpu_connector *aconnector = NULL; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + if (aconnector->dc_sink == sink) + break; + } + + return aconnector; +} + +static struct amdgpu_connector *get_connector_for_link( + struct drm_device *dev, + const struct dc_link *link) +{ + struct drm_connector *connector; + struct amdgpu_connector *aconnector = NULL; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + if (aconnector->dc_link == link) + break; + } + + return aconnector; +} + + +static void get_payload_table( + struct amdgpu_connector *aconnector, + struct dp_mst_stream_allocation_table *proposed_table) +{ + int i; + struct drm_dp_mst_topology_mgr *mst_mgr = + &aconnector->mst_port->mst_mgr; + + mutex_lock(&mst_mgr->payload_lock); + + proposed_table->stream_count = 0; + + /* number of active streams */ + for (i = 0; i < mst_mgr->max_payloads; i++) { + if (mst_mgr->payloads[i].num_slots == 0) + break; /* end of vcp_id table */ + + ASSERT(mst_mgr->payloads[i].payload_state != + DP_PAYLOAD_DELETE_LOCAL); + + if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL || + mst_mgr->payloads[i].payload_state == + DP_PAYLOAD_REMOTE) { + + struct dp_mst_stream_allocation *sa = + &proposed_table->stream_allocations[ + proposed_table->stream_count]; + + sa->slot_count = mst_mgr->payloads[i].num_slots; + sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi; + proposed_table->stream_count++; + } + } + + mutex_unlock(&mst_mgr->payload_lock); +} + +/* + * Writes payload allocation table in immediate downstream device. + */ +bool dm_helpers_dp_mst_write_payload_allocation_table( + struct dc_context *ctx, + const struct dc_stream *stream, + struct dp_mst_stream_allocation_table *proposed_table, + bool enable) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_port *mst_port; + int slots = 0; + bool ret; + int clock; + int bpp = 0; + int pbn = 0; + + aconnector = get_connector_for_sink(dev, stream->sink); + + if (!aconnector->mst_port) + return false; + + mst_mgr = &aconnector->mst_port->mst_mgr; + + if (!mst_mgr->mst_state) + return false; + + mst_port = aconnector->port; + + if (enable) { + clock = stream->timing.pix_clk_khz; + + switch (stream->timing.display_color_depth) { + + case COLOR_DEPTH_666: + bpp = 6; + break; + case COLOR_DEPTH_888: + bpp = 8; + break; + case COLOR_DEPTH_101010: + bpp = 10; + break; + case COLOR_DEPTH_121212: + bpp = 12; + break; + case COLOR_DEPTH_141414: + bpp = 14; + break; + case COLOR_DEPTH_161616: + bpp = 16; + break; + default: + ASSERT(bpp != 0); + break; + } + + bpp = bpp * 3; + + /* TODO need to know link rate */ + + pbn = drm_dp_calc_pbn_mode(clock, bpp); + + ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, &slots); + + if (!ret) + return false; + + } else { + drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); + } + + ret = drm_dp_update_payload_part1(mst_mgr); + + /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or + * AUX message. The sequence is slot 1-63 allocated sequence for each + * stream. AMD ASIC stream slot allocation should follow the same + * sequence. copy DRM MST allocation to dc */ + + get_payload_table(aconnector, proposed_table); + + if (ret) + return false; + + return true; +} + +/* + * Polls for ACT (allocation change trigger) handled and sends + * ALLOCATE_PAYLOAD message. + */ +bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( + struct dc_context *ctx, + const struct dc_stream *stream) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; + int ret; + + aconnector = get_connector_for_sink(dev, stream->sink); + + if (!aconnector->mst_port) + return false; + + mst_mgr = &aconnector->mst_port->mst_mgr; + + if (!mst_mgr->mst_state) + return false; + + ret = drm_dp_check_act_status(mst_mgr); + + if (ret) + return false; + + return true; +} + +bool dm_helpers_dp_mst_send_payload_allocation( + struct dc_context *ctx, + const struct dc_stream *stream, + bool enable) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_port *mst_port; + int ret; + + aconnector = get_connector_for_sink(dev, stream->sink); + + mst_port = aconnector->port; + + if (!aconnector->mst_port) + return false; + + mst_mgr = &aconnector->mst_port->mst_mgr; + + if (!mst_mgr->mst_state) + return false; + + ret = drm_dp_update_payload_part2(mst_mgr); + + if (ret) + return false; + + if (!enable) + drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); + + return true; +} + +void dm_helpers_dp_mst_handle_mst_hpd_rx_irq(void *param) +{ + uint8_t esi[8] = { 0 }; + uint8_t dret; + bool new_irq_handled = true; + struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param; + + /* DPCD 0x2002 - 0x2008 for down stream IRQ from MST, eDP etc. */ + dret = drm_dp_dpcd_read( + &aconnector->dm_dp_aux.aux, + DP_SINK_COUNT_ESI, esi, 8); + + while ((dret == 8) && new_irq_handled) { + uint8_t retry; + + DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); + + /* handle HPD short pulse irq */ + drm_dp_mst_hpd_irq(&aconnector->mst_mgr, esi, &new_irq_handled); + + if (new_irq_handled) { + /* ACK at DPCD to notify down stream */ + for (retry = 0; retry < 3; retry++) { + uint8_t wret; + + wret = drm_dp_dpcd_write( + &aconnector->dm_dp_aux.aux, + DP_SINK_COUNT_ESI + 1, + &esi[1], + 3); + if (wret == 3) + break; + } + + /* check if there is new irq to be handle */ + dret = drm_dp_dpcd_read( + &aconnector->dm_dp_aux.aux, + DP_SINK_COUNT_ESI, esi, 8); + } + } +} + +bool dm_helpers_dp_mst_start_top_mgr( + struct dc_context *ctx, + const struct dc_link *link, + bool boot) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector = get_connector_for_link(dev, link); + + if (boot) { + DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + return true; + } + + DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + + return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); +} + +void dm_helpers_dp_mst_stop_top_mgr( + struct dc_context *ctx, + const struct dc_link *link) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector = get_connector_for_link(dev, link); + + DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + + if (aconnector->mst_mgr.mst_state == true) + drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); +} + +bool dm_helper_dp_read_dpcd( + struct dc_context *ctx, + const struct dc_link *link, + uint32_t address, + uint8_t *data, + uint32_t size) { + + + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector = get_connector_for_link(dev, link); + + if (!aconnector) { + DRM_ERROR("Failed to found connector for link!"); + return false; + } + + return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, + data, size) > 0; +} + +bool dm_helper_dp_write_dpcd( + struct dc_context *ctx, + const struct dc_link *link, + uint32_t address, + const uint8_t *data, + uint32_t size) { + + struct amdgpu_device *adev = ctx->driver_context; + struct drm_device *dev = adev->ddev; + struct amdgpu_connector *aconnector = get_connector_for_link(dev, link); + + if (!aconnector) { + DRM_ERROR("Failed to found connector for link!"); + return false; + } + + return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, + address, (uint8_t *)data, size) > 0; +} diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c new file mode 100644 index 000000000000..9b5fd7063a0b --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c @@ -0,0 +1,820 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/drmP.h> + +#include "dm_services_types.h" +#include "dc.h" + +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_irq.h" + + +/****************************************************************************** + * Private declarations. + *****************************************************************************/ + +struct handler_common_data { + struct list_head list; + interrupt_handler handler; + void *handler_arg; + + /* DM which this handler belongs to */ + struct amdgpu_display_manager *dm; +}; + +struct amdgpu_dm_irq_handler_data { + struct handler_common_data hcd; + /* DAL irq source which registered for this interrupt. */ + enum dc_irq_source irq_source; +}; + +struct amdgpu_dm_timer_handler_data { + struct handler_common_data hcd; + struct delayed_work d_work; +}; + + +#define DM_IRQ_TABLE_LOCK(adev, flags) \ + spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags) + +#define DM_IRQ_TABLE_UNLOCK(adev, flags) \ + spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags) + +/****************************************************************************** + * Private functions. + *****************************************************************************/ + +static void init_handler_common_data( + struct handler_common_data *hcd, + void (*ih)(void *), + void *args, + struct amdgpu_display_manager *dm) +{ + hcd->handler = ih; + hcd->handler_arg = args; + hcd->dm = dm; +} + +/** + * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper. + * + * @work: work struct + */ +static void dm_irq_work_func(struct work_struct *work) +{ + struct list_head *entry; + struct irq_list_head *irq_list_head = + container_of(work, struct irq_list_head, work); + struct list_head *handler_list = &irq_list_head->head; + struct amdgpu_dm_irq_handler_data *handler_data; + + list_for_each(entry, handler_list) { + handler_data = + list_entry( + entry, + struct amdgpu_dm_irq_handler_data, + hcd.list); + + DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", + handler_data->irq_source); + + DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", + handler_data->irq_source); + + handler_data->hcd.handler(handler_data->hcd.handler_arg); + } + + /* Call a DAL subcomponent which registered for interrupt notification + * at INTERRUPT_LOW_IRQ_CONTEXT. + * (The most common use is HPD interrupt) */ +} + +/** + * Remove a handler and return a pointer to hander list from which the + * handler was removed. + */ +static struct list_head *remove_irq_handler( + struct amdgpu_device *adev, + void *ih, + const struct dc_interrupt_params *int_params) +{ + struct list_head *hnd_list; + struct list_head *entry, *tmp; + struct amdgpu_dm_irq_handler_data *handler; + unsigned long irq_table_flags; + bool handler_removed = false; + enum dc_irq_source irq_source; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + irq_source = int_params->irq_source; + + switch (int_params->int_context) { + case INTERRUPT_HIGH_IRQ_CONTEXT: + hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; + break; + case INTERRUPT_LOW_IRQ_CONTEXT: + default: + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; + break; + } + + list_for_each_safe(entry, tmp, hnd_list) { + + handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, + hcd.list); + + if (ih == handler) { + /* Found our handler. Remove it from the list. */ + list_del(&handler->hcd.list); + handler_removed = true; + break; + } + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (handler_removed == false) { + /* Not necessarily an error - caller may not + * know the context. */ + return NULL; + } + + kfree(handler); + + DRM_DEBUG_KMS( + "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n", + ih, int_params->irq_source, int_params->int_context); + + return hnd_list; +} + +/* If 'handler_in == NULL' then remove ALL handlers. */ +static void remove_timer_handler( + struct amdgpu_device *adev, + struct amdgpu_dm_timer_handler_data *handler_in) +{ + struct amdgpu_dm_timer_handler_data *handler_temp; + struct list_head *handler_list; + struct list_head *entry, *tmp; + unsigned long irq_table_flags; + bool handler_removed = false; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + handler_list = &adev->dm.timer_handler_list; + + list_for_each_safe(entry, tmp, handler_list) { + /* Note that list_for_each_safe() guarantees that + * handler_temp is NOT null. */ + handler_temp = list_entry(entry, + struct amdgpu_dm_timer_handler_data, hcd.list); + + if (handler_in == NULL || handler_in == handler_temp) { + list_del(&handler_temp->hcd.list); + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n", + handler_temp); + + if (handler_in == NULL) { + /* Since it is still in the queue, it must + * be cancelled. */ + cancel_delayed_work_sync(&handler_temp->d_work); + } + + kfree(handler_temp); + handler_removed = true; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + } + + if (handler_in == NULL) { + /* Remove ALL handlers. */ + continue; + } + + if (handler_in == handler_temp) { + /* Remove a SPECIFIC handler. + * Found our handler - we can stop here. */ + break; + } + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (handler_in != NULL && handler_removed == false) { + DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n", + handler_in); + } +} + +/** + * dm_timer_work_func - Handle a timer. + * + * @work: work struct + */ +static void dm_timer_work_func( + struct work_struct *work) +{ + struct amdgpu_dm_timer_handler_data *handler_data = + container_of(work, struct amdgpu_dm_timer_handler_data, + d_work.work); + + DRM_DEBUG_KMS("DM_IRQ: work_func: handler_data=%p\n", handler_data); + + /* Call a DAL subcomponent which registered for timer notification. */ + handler_data->hcd.handler(handler_data->hcd.handler_arg); + + /* We support only "single shot" timers. That means we must delete + * the handler after it was called. */ + remove_timer_handler(handler_data->hcd.dm->adev, handler_data); +} + +static bool validate_irq_registration_params( + struct dc_interrupt_params *int_params, + void (*ih)(void *)) +{ + if (NULL == int_params || NULL == ih) { + DRM_ERROR("DM_IRQ: invalid input!\n"); + return false; + } + + if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) { + DRM_ERROR("DM_IRQ: invalid context: %d!\n", + int_params->int_context); + return false; + } + + if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) { + DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n", + int_params->irq_source); + return false; + } + + return true; +} + +static bool validate_irq_unregistration_params( + enum dc_irq_source irq_source, + irq_handler_idx handler_idx) +{ + if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) { + DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); + return false; + } + + if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) { + DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source); + return false; + } + + return true; +} +/****************************************************************************** + * Public functions. + * + * Note: caller is responsible for input validation. + *****************************************************************************/ + +void *amdgpu_dm_irq_register_interrupt( + struct amdgpu_device *adev, + struct dc_interrupt_params *int_params, + void (*ih)(void *), + void *handler_args) +{ + struct list_head *hnd_list; + struct amdgpu_dm_irq_handler_data *handler_data; + unsigned long irq_table_flags; + enum dc_irq_source irq_source; + + if (false == validate_irq_registration_params(int_params, ih)) + return DAL_INVALID_IRQ_HANDLER_IDX; + + handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL); + if (!handler_data) { + DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); + return DAL_INVALID_IRQ_HANDLER_IDX; + } + + memset(handler_data, 0, sizeof(*handler_data)); + + init_handler_common_data(&handler_data->hcd, ih, handler_args, + &adev->dm); + + irq_source = int_params->irq_source; + + handler_data->irq_source = irq_source; + + /* Lock the list, add the handler. */ + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + switch (int_params->int_context) { + case INTERRUPT_HIGH_IRQ_CONTEXT: + hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; + break; + case INTERRUPT_LOW_IRQ_CONTEXT: + default: + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; + break; + } + + list_add_tail(&handler_data->hcd.list, hnd_list); + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + /* This pointer will be stored by code which requested interrupt + * registration. + * The same pointer will be needed in order to unregister the + * interrupt. */ + + DRM_DEBUG_KMS( + "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", + handler_data, + irq_source, + int_params->int_context); + + return handler_data; +} + +void amdgpu_dm_irq_unregister_interrupt( + struct amdgpu_device *adev, + enum dc_irq_source irq_source, + void *ih) +{ + struct list_head *handler_list; + struct dc_interrupt_params int_params; + int i; + + if (false == validate_irq_unregistration_params(irq_source, ih)) + return; + + memset(&int_params, 0, sizeof(int_params)); + + int_params.irq_source = irq_source; + + for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) { + + int_params.int_context = i; + + handler_list = remove_irq_handler(adev, ih, &int_params); + + if (handler_list != NULL) + break; + } + + if (handler_list == NULL) { + /* If we got here, it means we searched all irq contexts + * for this irq source, but the handler was not found. */ + DRM_ERROR( + "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", + ih, irq_source); + } +} + +int amdgpu_dm_irq_init( + struct amdgpu_device *adev) +{ + int src; + struct irq_list_head *lh; + + DRM_DEBUG_KMS("DM_IRQ\n"); + + spin_lock_init(&adev->dm.irq_handler_list_table_lock); + + for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { + /* low context handler list init */ + lh = &adev->dm.irq_handler_list_low_tab[src]; + INIT_LIST_HEAD(&lh->head); + INIT_WORK(&lh->work, dm_irq_work_func); + + /* high context handler init */ + INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); + } + + INIT_LIST_HEAD(&adev->dm.timer_handler_list); + + /* allocate and initialize the workqueue for DM timer */ + adev->dm.timer_workqueue = create_singlethread_workqueue( + "dm_timer_queue"); + if (adev->dm.timer_workqueue == NULL) { + DRM_ERROR("DM_IRQ: unable to create timer queue!\n"); + return -1; + } + + return 0; +} + +void amdgpu_dm_irq_register_timer( + struct amdgpu_device *adev, + struct dc_timer_interrupt_params *int_params, + interrupt_handler ih, + void *args) +{ + unsigned long jf_delay; + struct list_head *handler_list; + struct amdgpu_dm_timer_handler_data *handler_data; + unsigned long irq_table_flags; + + handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL); + if (!handler_data) { + DRM_ERROR("DM_IRQ: failed to allocate timer handler!\n"); + return; + } + + memset(handler_data, 0, sizeof(*handler_data)); + + init_handler_common_data(&handler_data->hcd, ih, args, &adev->dm); + + INIT_DELAYED_WORK(&handler_data->d_work, dm_timer_work_func); + + /* Lock the list, add the handler. */ + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + handler_list = &adev->dm.timer_handler_list; + + list_add_tail(&handler_data->hcd.list, handler_list); + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + jf_delay = usecs_to_jiffies(int_params->micro_sec_interval); + + queue_delayed_work(adev->dm.timer_workqueue, &handler_data->d_work, + jf_delay); + + DRM_DEBUG_KMS("DM_IRQ: added handler:%p with micro_sec_interval=%u\n", + handler_data, int_params->micro_sec_interval); + return; +} + +/* DM IRQ and timer resource release */ +void amdgpu_dm_irq_fini( + struct amdgpu_device *adev) +{ + int src; + struct irq_list_head *lh; + DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); + + for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { + + /* The handler was removed from the table, + * it means it is safe to flush all the 'work' + * (because no code can schedule a new one). */ + lh = &adev->dm.irq_handler_list_low_tab[src]; + flush_work(&lh->work); + } + + /* Cancel ALL timers and release handlers (if any). */ + remove_timer_handler(adev, NULL); + /* Release the queue itself. */ + destroy_workqueue(adev->dm.timer_workqueue); +} + +int amdgpu_dm_irq_suspend( + struct amdgpu_device *adev) +{ + int src; + struct list_head *hnd_list_h; + struct list_head *hnd_list_l; + unsigned long irq_table_flags; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + DRM_DEBUG_KMS("DM_IRQ: suspend\n"); + + /* disable HW interrupt */ + for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) { + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; + if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) + dc_interrupt_set(adev->dm.dc, src, false); + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + flush_work(&adev->dm.irq_handler_list_low_tab[src].work); + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + return 0; +} + +int amdgpu_dm_irq_resume( + struct amdgpu_device *adev) +{ + int src; + struct list_head *hnd_list_h, *hnd_list_l; + unsigned long irq_table_flags; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + DRM_DEBUG_KMS("DM_IRQ: resume\n"); + + /* re-enable HW interrupt */ + for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) { + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; + if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) + dc_interrupt_set(adev->dm.dc, src, true); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + return 0; +} + + +/** + * amdgpu_dm_irq_schedule_work - schedule all work items registered for the + * "irq_source". + */ +static void amdgpu_dm_irq_schedule_work( + struct amdgpu_device *adev, + enum dc_irq_source irq_source) +{ + unsigned long irq_table_flags; + struct work_struct *work = NULL; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head)) + work = &adev->dm.irq_handler_list_low_tab[irq_source].work; + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (work) { + if (!schedule_work(work)) + DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n", + irq_source); + } + +} + +/** amdgpu_dm_irq_immediate_work + * Callback high irq work immediately, don't send to work queue + */ +static void amdgpu_dm_irq_immediate_work( + struct amdgpu_device *adev, + enum dc_irq_source irq_source) +{ + struct amdgpu_dm_irq_handler_data *handler_data; + struct list_head *entry; + unsigned long irq_table_flags; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + list_for_each( + entry, + &adev->dm.irq_handler_list_high_tab[irq_source]) { + + handler_data = + list_entry( + entry, + struct amdgpu_dm_irq_handler_data, + hcd.list); + + /* Call a subcomponent which registered for immediate + * interrupt notification */ + handler_data->hcd.handler(handler_data->hcd.handler_arg); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); +} + +/* + * amdgpu_dm_irq_handler + * + * Generic IRQ handler, calls all registered high irq work immediately, and + * schedules work for low irq + */ +int amdgpu_dm_irq_handler( + struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + + enum dc_irq_source src = + dc_interrupt_to_irq_source( + adev->dm.dc, + entry->src_id, + entry->src_data); + + dc_interrupt_ack(adev->dm.dc, src); + + /* Call high irq work immediately */ + amdgpu_dm_irq_immediate_work(adev, src); + /*Schedule low_irq work */ + amdgpu_dm_irq_schedule_work(adev, src); + + return 0; +} + +static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) +{ + switch (type) { + case AMDGPU_HPD_1: + return DC_IRQ_SOURCE_HPD1; + case AMDGPU_HPD_2: + return DC_IRQ_SOURCE_HPD2; + case AMDGPU_HPD_3: + return DC_IRQ_SOURCE_HPD3; + case AMDGPU_HPD_4: + return DC_IRQ_SOURCE_HPD4; + case AMDGPU_HPD_5: + return DC_IRQ_SOURCE_HPD5; + case AMDGPU_HPD_6: + return DC_IRQ_SOURCE_HPD6; + default: + return DC_IRQ_SOURCE_INVALID; + } +} + +static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); + bool st = (state == AMDGPU_IRQ_STATE_ENABLE); + + dc_interrupt_set(adev->dm.dc, src, st); + return 0; +} + +static inline int dm_irq_state( + struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned crtc_id, + enum amdgpu_interrupt_state state, + const enum irq_type dal_irq_type, + const char *func) +{ + bool st; + enum dc_irq_source irq_source; + + struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; + + if (!acrtc->target) { + DRM_INFO( + "%s: target is null for crtc %d, talk to David R\n", + func, + crtc_id); + WARN_ON(true); + return 0; + } + + irq_source = dc_target_get_irq_src(acrtc->target, dal_irq_type); + + st = (state == AMDGPU_IRQ_STATE_ENABLE); + + dc_interrupt_set(adev->dm.dc, irq_source, st); + return 0; +} + +static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned crtc_id, + enum amdgpu_interrupt_state state) +{ + return dm_irq_state( + adev, + source, + crtc_id, + state, + IRQ_TYPE_PFLIP, + __func__); +} + +static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned crtc_id, + enum amdgpu_interrupt_state state) +{ + return dm_irq_state( + adev, + source, + crtc_id, + state, + IRQ_TYPE_VUPDATE, + __func__); +} + +static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { + .set = amdgpu_dm_set_crtc_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = { + .set = amdgpu_dm_set_pflip_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = { + .set = amdgpu_dm_set_hpd_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.funcs = &dm_crtc_irq_funcs; + + adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; + adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; + + adev->hpd_irq.num_types = AMDGPU_HPD_LAST; + adev->hpd_irq.funcs = &dm_hpd_irq_funcs; +} + +/* + * amdgpu_dm_hpd_init - hpd setup callback. + * + * @adev: amdgpu_device pointer + * + * Setup the hpd pins used by the card (evergreen+). + * Enable the pin, set the polarity, and enable the hpd interrupts. + */ +void amdgpu_dm_hpd_init(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + + const struct dc_link *dc_link = amdgpu_connector->dc_link; + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking + * the aux dp channel on imac and help (but not + * completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + continue; + } + + dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, true); + + if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd_rx, + true); + } + } +} + +/** + * amdgpu_dm_hpd_fini - hpd tear down callback. + * + * @adev: amdgpu_device pointer + * + * Tear down the hpd pins used by the card (evergreen+). + * Disable the hpd interrupts. + */ +void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + const struct dc_link *dc_link = amdgpu_connector->dc_link; + + dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false); + + if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd_rx, + false); + } + } +} diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h new file mode 100644 index 000000000000..afedb50897bd --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h @@ -0,0 +1,122 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_DM_IRQ_H__ +#define __AMDGPU_DM_IRQ_H__ + +#include "irq_types.h" /* DAL irq definitions */ + +/* + * Display Manager IRQ-related interfaces (for use by DAL). + */ + +/** + * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'. + * + * This function should be called exactly once - during DM initialization. + * + * Returns: + * 0 - success + * non-zero - error + */ +int amdgpu_dm_irq_init( + struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'. + * + * This function should be called exactly once - during DM destruction. + * + */ +void amdgpu_dm_irq_fini( + struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_register_interrupt - register irq handler for Display block. + * + * @adev: AMD DRM device + * @int_params: parameters for the irq + * @ih: pointer to the irq hander function + * @handler_args: arguments which will be passed to ih + * + * Returns: + * IRQ Handler Index on success. + * NULL on failure. + * + * Cannot be called from an interrupt handler. + */ +void *amdgpu_dm_irq_register_interrupt( + struct amdgpu_device *adev, + struct dc_interrupt_params *int_params, + void (*ih)(void *), + void *handler_args); + +/** + * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered + * by amdgpu_dm_irq_register_interrupt(). + * + * @adev: AMD DRM device. + * @ih_index: irq handler index which was returned by + * amdgpu_dm_irq_register_interrupt + */ +void amdgpu_dm_irq_unregister_interrupt( + struct amdgpu_device *adev, + enum dc_irq_source irq_source, + void *ih_index); + +void amdgpu_dm_irq_register_timer( + struct amdgpu_device *adev, + struct dc_timer_interrupt_params *int_params, + interrupt_handler ih, + void *args); + +/** + * amdgpu_dm_irq_handler + * Generic IRQ handler, calls all registered high irq work immediately, and + * schedules work for low irq + */ +int amdgpu_dm_irq_handler( + struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); + +void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev); + +void amdgpu_dm_hpd_init(struct amdgpu_device *adev); +void amdgpu_dm_hpd_fini(struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend. + * + */ +int amdgpu_dm_irq_suspend( + struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_resume - enable ASIC interrupt during resume. + * + */ +int amdgpu_dm_irq_resume( + struct amdgpu_device *adev); + +#endif /* __AMDGPU_DM_IRQ_H__ */ diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.c new file mode 100644 index 000000000000..22ba13080cf1 --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.c @@ -0,0 +1,480 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/version.h> +#include <drm/drm_atomic_helper.h> +#include "dm_services.h" +#include "amdgpu.h" +#include "amdgpu_dm_types.h" +#include "amdgpu_dm_mst_types.h" + +#include "dc.h" +#include "dm_helpers.h" + + +/* #define TRACE_DPCD */ + +#ifdef TRACE_DPCD +#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI) + +static inline char *side_band_msg_type_to_str(uint32_t address) +{ + static char str[10] = {0}; + + if (address < DP_SIDEBAND_MSG_UP_REP_BASE) + strcpy(str, "DOWN_REQ"); + else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE) + strcpy(str, "UP_REP"); + else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE) + strcpy(str, "DOWN_REP"); + else + strcpy(str, "UP_REQ"); + + return str; +} + +void log_dpcd(uint8_t type, + uint32_t address, + uint8_t *data, + uint32_t size, + bool res) +{ + DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n", + (type == DP_AUX_NATIVE_READ) || + (type == DP_AUX_I2C_READ) ? + "Read" : "Write", + address, + SIDE_BAND_MSG(address) ? + side_band_msg_type_to_str(address) : "Nop", + res ? "OK" : "Fail"); + + if (res) { + print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false); + } +} +#endif + +static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct pci_dev *pdev = to_pci_dev(aux->dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_dev->dev_private; + struct dc *dc = adev->dm.dc; + bool res; + + switch (msg->request) { + case DP_AUX_NATIVE_READ: + res = dc_read_dpcd( + dc, + TO_DM_AUX(aux)->link_index, + msg->address, + msg->buffer, + msg->size); + break; + case DP_AUX_NATIVE_WRITE: + res = dc_write_dpcd( + dc, + TO_DM_AUX(aux)->link_index, + msg->address, + msg->buffer, + msg->size); + break; + default: + return 0; + } + +#ifdef TRACE_DPCD + log_dpcd(msg->request, + msg->address, + msg->buffer, + msg->size, + res); +#endif + + return msg->size; +} + +static enum drm_connector_status +dm_dp_mst_detect(struct drm_connector *connector, bool force) +{ + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + struct amdgpu_connector *master = aconnector->mst_port; + + enum drm_connector_status status = + drm_dp_mst_detect_port( + connector, + &master->mst_mgr, + aconnector->port); + + if (status == connector_status_disconnected && aconnector->edid) { + kfree(aconnector->edid); + aconnector->edid = NULL; + } + + /* + * we do not want to make this connector connected until we have edid on + * it + */ + if (status == connector_status_connected && + !aconnector->port->cached_edid) + status = connector_status_disconnected; + + return status; +} + +static void +dm_dp_mst_connector_destroy(struct drm_connector *connector) +{ + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + struct amdgpu_encoder *amdgpu_encoder = amdgpu_connector->mst_encoder; + + drm_encoder_cleanup(&amdgpu_encoder->base); + kfree(amdgpu_encoder); + drm_connector_cleanup(connector); + kfree(amdgpu_connector); +} + +static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .detect = dm_dp_mst_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = dm_dp_mst_connector_destroy, + .reset = amdgpu_dm_connector_funcs_reset, + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, + .atomic_destroy_state = amdgpu_dm_connector_atomic_destroy_state, + .atomic_set_property = amdgpu_dm_connector_atomic_set_property +}; + +static struct dc_sink *dm_dp_mst_add_mst_sink( + const struct dc_link *dc_link, + uint8_t *edid, + uint16_t len) +{ + struct dc_sink *dc_sink; + struct dc_sink_init_data init_params = { + .link = dc_link, + .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST}; + enum dc_edid_status edid_status; + + if (len > MAX_EDID_BUFFER_SIZE) { + DRM_ERROR("Max EDID buffer size breached!\n"); + return NULL; + } + + if (!dc_link) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + /* + * TODO make dynamic-ish? + * dc_link->connector_signal; + */ + + dc_sink = dc_sink_create(&init_params); + + if (!dc_sink) + return NULL; + + dm_memmove(dc_sink->dc_edid.raw_edid, edid, len); + dc_sink->dc_edid.length = len; + + if (!dc_link_add_remote_sink( + dc_link, + dc_sink)) + goto fail_add_sink; + + edid_status = dm_helpers_parse_edid_caps( + NULL, + &dc_sink->dc_edid, + &dc_sink->edid_caps); + if (edid_status != EDID_OK) + goto fail; + + /* dc_sink_retain(&core_sink->public); */ + + return dc_sink; +fail: + dc_link_remove_remote_sink(dc_link, dc_sink); +fail_add_sink: + return NULL; +} + +static int dm_dp_mst_get_modes(struct drm_connector *connector) +{ + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + struct amdgpu_connector *master = aconnector->mst_port; + struct edid *edid; + const struct dc_sink *sink; + int ret = 0; + + if (!aconnector->edid) { + edid = drm_dp_mst_get_edid(connector, &master->mst_mgr, aconnector->port); + + if (!edid) { + drm_mode_connector_update_edid_property( + &aconnector->base, + NULL); + + return ret; + } + + aconnector->edid = edid; + + if (aconnector->dc_sink) + dc_link_remove_remote_sink( + aconnector->dc_link, + aconnector->dc_sink); + + sink = dm_dp_mst_add_mst_sink( + aconnector->dc_link, + (uint8_t *)edid, + (edid->extensions + 1) * EDID_LENGTH); + aconnector->dc_sink = sink; + } else + edid = aconnector->edid; + + DRM_DEBUG_KMS("edid retrieved %p\n", edid); + + drm_mode_connector_update_edid_property( + &aconnector->base, + aconnector->edid); + + ret = drm_add_edid_modes(&aconnector->base, aconnector->edid); + + drm_edid_to_eld(&aconnector->base, aconnector->edid); + + return ret; +} + +static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector) +{ + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + return &amdgpu_connector->mst_encoder->base; +} + +static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { + .get_modes = dm_dp_mst_get_modes, + .mode_valid = amdgpu_dm_connector_mode_valid, + .best_encoder = dm_mst_best_encoder, +}; + +static struct amdgpu_encoder * +dm_dp_create_fake_mst_encoder(struct amdgpu_connector *connector) +{ + struct drm_device *dev = connector->base.dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder; + struct drm_encoder *encoder; + const struct drm_connector_helper_funcs *connector_funcs = + connector->base.helper_private; + struct drm_encoder *enc_master = + connector_funcs->best_encoder(&connector->base); + + DRM_DEBUG_KMS("enc master is %p\n", enc_master); + amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); + if (!amdgpu_encoder) + return NULL; + + encoder = &amdgpu_encoder->base; + encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); + + drm_encoder_init( + dev, + &amdgpu_encoder->base, + NULL, + DRM_MODE_ENCODER_DPMST, + NULL); + + drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); + + return amdgpu_encoder; +} + +static struct drm_connector *dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + const char *pathprop) +{ + struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_connector *aconnector; + struct drm_connector *connector; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + aconnector = to_amdgpu_connector(connector); + if (aconnector->mst_port == master + && !aconnector->port) { + DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n", + aconnector, connector->base.id, aconnector->mst_port); + + aconnector->port = port; + drm_mode_connector_set_path_property(connector, pathprop); + + drm_modeset_unlock(&dev->mode_config.connection_mutex); + return &aconnector->base; + } + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + + aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); + if (!aconnector) + return NULL; + + connector = &aconnector->base; + aconnector->port = port; + aconnector->mst_port = master; + + if (drm_connector_init( + dev, + connector, + &dm_dp_mst_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort)) { + kfree(aconnector); + return NULL; + } + drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs); + + amdgpu_dm_connector_init_helper( + &adev->dm, + aconnector, + DRM_MODE_CONNECTOR_DisplayPort, + master->dc_link, + master->connector_id); + + aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); + + /* + * TODO: understand why this one is needed + */ + drm_object_attach_property( + &connector->base, + dev->mode_config.path_property, + 0); + drm_object_attach_property( + &connector->base, + dev->mode_config.tile_property, + 0); + + drm_mode_connector_set_path_property(connector, pathprop); + + /* + * Initialize connector state before adding the connectror to drm and + * framebuffer lists + */ + amdgpu_dm_connector_funcs_reset(connector); + + DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", + aconnector, connector->base.id, aconnector->mst_port); + + DRM_DEBUG_KMS(":%d\n", connector->base.id); + + return connector; +} + +static void dm_dp_destroy_mst_connector( + struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector) +{ + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + + DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", + aconnector, connector->base.id, aconnector->mst_port); + + aconnector->port = NULL; + if (aconnector->dc_sink) { + dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); + aconnector->dc_sink = NULL; + } +} + +static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) +{ + struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct amdgpu_device *adev = dev->dev_private; + + schedule_work(&adev->dm.mst_hotplug_work); +} + +static void dm_dp_mst_register_connector(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = dev->dev_private; + int i; + + drm_modeset_lock_all(dev); + if (adev->mode_info.rfbdev) { + /*Do not add if already registered in past*/ + for (i = 0; i < adev->mode_info.rfbdev->helper.connector_count; i++) { + if (adev->mode_info.rfbdev->helper.connector_info[i]->connector + == connector) { + drm_modeset_unlock_all(dev); + return; + } + } + + drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); + } + else + DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); + + drm_modeset_unlock_all(dev); + + drm_connector_register(connector); + +} + +struct drm_dp_mst_topology_cbs dm_mst_cbs = { + .add_connector = dm_dp_add_mst_connector, + .destroy_connector = dm_dp_destroy_mst_connector, + .hotplug = dm_dp_mst_hotplug, + .register_connector = dm_dp_mst_register_connector +}; + + +void amdgpu_dm_initialize_mst_connector( + struct amdgpu_display_manager *dm, + struct amdgpu_connector *aconnector) +{ + aconnector->dm_dp_aux.aux.name = "dmdc"; + aconnector->dm_dp_aux.aux.dev = dm->adev->dev; + aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; + aconnector->dm_dp_aux.link_index = aconnector->connector_id; + + drm_dp_aux_register(&aconnector->dm_dp_aux.aux); + aconnector->mst_mgr.cbs = &dm_mst_cbs; + drm_dp_mst_topology_mgr_init( + &aconnector->mst_mgr, + dm->adev->dev, + &aconnector->dm_dp_aux.aux, + 16, + 4, + aconnector->connector_id); +} + diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.h new file mode 100644 index 000000000000..6130d62ac65c --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.h @@ -0,0 +1,36 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__ +#define __DAL_AMDGPU_DM_MST_TYPES_H__ + +struct amdgpu_display_manager; +struct amdgpu_connector; + +void amdgpu_dm_initialize_mst_connector( + struct amdgpu_display_manager *dm, + struct amdgpu_connector *aconnector); + +#endif diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_services.c new file mode 100644 index 000000000000..b1552700d9e0 --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_services.c @@ -0,0 +1,457 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/string.h> +#include <linux/acpi.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/amdgpu_drm.h> +#include "dm_services.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_irq.h" +#include "amdgpu_dm_types.h" +#include "amdgpu_pm.h" + +/* +#include "logger_interface.h" +#include "acpimethod_atif.h" +#include "amdgpu_powerplay.h" +#include "amdgpu_notifications.h" +*/ + +/* if the pointer is not NULL, the allocated memory is zeroed */ +void *dm_alloc(struct dc_context *ctx, uint32_t size) +{ + return kzalloc(size, GFP_KERNEL); +} + +/* Reallocate memory. The contents will remain unchanged.*/ +void *dm_realloc(struct dc_context *ctx, const void *ptr, uint32_t size) +{ + return krealloc(ptr, size, GFP_KERNEL); +} + +void dm_memmove(void *dst, const void *src, uint32_t size) +{ + memmove(dst, src, size); +} + +void dm_free(struct dc_context *ctx, void *p) +{ + kfree(p); +} + +void dm_memset(void *p, int32_t c, uint32_t count) +{ + memset(p, c, count); +} + +int32_t dm_memcmp(const void *p1, const void *p2, uint32_t count) +{ + return memcmp(p1, p2, count); +} + +int32_t dm_strncmp(const int8_t *p1, const int8_t *p2, uint32_t count) +{ + return strncmp(p1, p2, count); +} + +void dm_sleep_in_milliseconds(struct dc_context *ctx, uint32_t milliseconds) +{ + if (milliseconds >= 20) + msleep(milliseconds); + else + usleep_range(milliseconds*1000, milliseconds*1000+1); +} + +void dal_delay_in_nanoseconds(uint32_t nanoseconds) +{ + ndelay(nanoseconds); +} + +void dm_delay_in_microseconds(struct dc_context *ctx, uint32_t microseconds) +{ + udelay(microseconds); +} + +/****************************************************************************** + * IRQ Interfaces. + *****************************************************************************/ + +void dal_register_timer_interrupt( + struct dc_context *ctx, + struct dc_timer_interrupt_params *int_params, + interrupt_handler ih, + void *args) +{ + struct amdgpu_device *adev = ctx->driver_context; + + if (!adev || !int_params) { + DRM_ERROR("DM_IRQ: invalid input!\n"); + return; + } + + if (int_params->int_context != INTERRUPT_LOW_IRQ_CONTEXT) { + /* only low irq ctx is supported. */ + DRM_ERROR("DM_IRQ: invalid context: %d!\n", + int_params->int_context); + return; + } + + amdgpu_dm_irq_register_timer(adev, int_params, ih, args); +} + +void dal_isr_acquire_lock(struct dc_context *ctx) +{ + /*TODO*/ +} + +void dal_isr_release_lock(struct dc_context *ctx) +{ + /*TODO*/ +} + +/****************************************************************************** + * End-of-IRQ Interfaces. + *****************************************************************************/ + +bool dm_get_platform_info(struct dc_context *ctx, + struct platform_info_params *params) +{ + /*TODO*/ + return false; +} + +/**** power component interfaces ****/ + +bool dm_pp_pre_dce_clock_change( + struct dc_context *ctx, + struct dal_to_power_info *input, + struct power_to_dal_info *output) +{ + /*TODO*/ + return false; +} + +bool dm_pp_apply_safe_state( + const struct dc_context *ctx) +{ +#ifdef CONFIG_DRM_AMD_POWERPLAY + struct amdgpu_device *adev = ctx->driver_context; + + if (adev->pm.dpm_enabled) { + /* TODO: Does this require PreModeChange event to PPLIB? */ + } + + return true; +#else + return false; +#endif +} + +bool dm_pp_apply_display_requirements( + const struct dc_context *ctx, + const struct dc_pp_display_configuration *pp_display_cfg) +{ +#ifdef CONFIG_DRM_AMD_POWERPLAY + struct amdgpu_device *adev = ctx->driver_context; + + if (adev->pm.dpm_enabled) { + + memset(&adev->pm.pm_display_cfg, 0, + sizeof(adev->pm.pm_display_cfg)); + + adev->pm.pm_display_cfg.cpu_cc6_disable = + pp_display_cfg->cpu_cc6_disable; + + adev->pm.pm_display_cfg.cpu_pstate_disable = + pp_display_cfg->cpu_pstate_disable; + + adev->pm.pm_display_cfg.cpu_pstate_separation_time = + pp_display_cfg->cpu_pstate_separation_time; + + adev->pm.pm_display_cfg.nb_pstate_switch_disable = + pp_display_cfg->nb_pstate_switch_disable; + + adev->pm.pm_display_cfg.num_display = + pp_display_cfg->display_count; + adev->pm.pm_display_cfg.num_path_including_non_display = + pp_display_cfg->display_count; + + adev->pm.pm_display_cfg.min_core_set_clock = + pp_display_cfg->min_engine_clock_khz/10; + adev->pm.pm_display_cfg.min_core_set_clock_in_sr = + pp_display_cfg->min_engine_clock_deep_sleep_khz/10; + adev->pm.pm_display_cfg.min_mem_set_clock = + pp_display_cfg->min_memory_clock_khz/10; + + adev->pm.pm_display_cfg.multi_monitor_in_sync = + pp_display_cfg->all_displays_in_sync; + adev->pm.pm_display_cfg.min_vblank_time = + pp_display_cfg->avail_mclk_switch_time_us; + + adev->pm.pm_display_cfg.display_clk = + pp_display_cfg->disp_clk_khz/10; + + adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency = + pp_display_cfg->avail_mclk_switch_time_in_disp_active_us; + + adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index; + adev->pm.pm_display_cfg.line_time_in_us = + pp_display_cfg->line_time_in_us; + + adev->pm.pm_display_cfg.crossfire_display_index = -1; + adev->pm.pm_display_cfg.min_bus_bandwidth = 0; + + /* TODO: complete implementation of + * amd_powerplay_display_configuration_change(). + * Follow example of: + * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c + * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */ + amd_powerplay_display_configuration_change( + adev->powerplay.pp_handle, + &adev->pm.pm_display_cfg); + + /* TODO: replace by a separate call to 'apply display cfg'? */ + amdgpu_pm_compute_clocks(adev); + } + + return true; +#else + return false; +#endif +} + +bool dc_service_get_system_clocks_range( + const struct dc_context *ctx, + struct dal_system_clock_range *sys_clks) +{ +#ifdef CONFIG_DRM_AMD_POWERPLAY + struct amdgpu_device *adev = ctx->driver_context; +#endif + + /* Default values, in case PPLib is not compiled-in. */ + sys_clks->max_mclk = 80000; + sys_clks->min_mclk = 80000; + + sys_clks->max_sclk = 60000; + sys_clks->min_sclk = 30000; + +#ifdef CONFIG_DRM_AMD_POWERPLAY + if (adev->pm.dpm_enabled) { + sys_clks->max_mclk = amdgpu_dpm_get_mclk(adev, false); + sys_clks->min_mclk = amdgpu_dpm_get_mclk(adev, true); + + sys_clks->max_sclk = amdgpu_dpm_get_sclk(adev, false); + sys_clks->min_sclk = amdgpu_dpm_get_sclk(adev, true); + } +#endif + + return true; +} + +static void get_default_clock_levels( + enum dc_pp_clock_type clk_type, + struct dc_pp_clock_levels *clks) +{ + uint32_t disp_clks_in_khz[6] = { + 300000, 400000, 496560, 626090, 685720, 757900 }; + uint32_t sclks_in_khz[6] = { + 300000, 360000, 423530, 514290, 626090, 720000 }; + uint32_t mclks_in_khz[2] = { 333000, 800000 }; + + switch (clk_type) { + case DC_PP_CLOCK_TYPE_DISPLAY_CLK: + clks->num_levels = 6; + dm_memmove(clks->clocks_in_khz, disp_clks_in_khz, + sizeof(disp_clks_in_khz)); + break; + case DC_PP_CLOCK_TYPE_ENGINE_CLK: + clks->num_levels = 6; + dm_memmove(clks->clocks_in_khz, sclks_in_khz, + sizeof(sclks_in_khz)); + break; + case DC_PP_CLOCK_TYPE_MEMORY_CLK: + clks->num_levels = 2; + dm_memmove(clks->clocks_in_khz, mclks_in_khz, + sizeof(mclks_in_khz)); + break; + default: + clks->num_levels = 0; + break; + } +} + +#ifdef CONFIG_DRM_AMD_POWERPLAY +static enum amd_pp_clock_type dc_to_pp_clock_type( + enum dc_pp_clock_type dc_pp_clk_type) +{ + enum amd_pp_clock_type amd_pp_clk_type = 0; + + switch (dc_pp_clk_type) { + case DC_PP_CLOCK_TYPE_DISPLAY_CLK: + amd_pp_clk_type = amd_pp_disp_clock; + break; + case DC_PP_CLOCK_TYPE_ENGINE_CLK: + amd_pp_clk_type = amd_pp_sys_clock; + break; + case DC_PP_CLOCK_TYPE_MEMORY_CLK: + amd_pp_clk_type = amd_pp_mem_clock; + break; + default: + DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", + dc_pp_clk_type); + break; + } + + return amd_pp_clk_type; +} + +static void pp_to_dc_clock_levels( + const struct amd_pp_clocks *pp_clks, + struct dc_pp_clock_levels *dc_clks, + enum dc_pp_clock_type dc_clk_type) +{ + uint32_t i; + + if (pp_clks->count > DC_PP_MAX_CLOCK_LEVELS) { + DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), + pp_clks->count, + DC_PP_MAX_CLOCK_LEVELS); + + dc_clks->num_levels = DC_PP_MAX_CLOCK_LEVELS; + } else + dc_clks->num_levels = pp_clks->count; + + DRM_INFO("DM_PPLIB: values for %s clock\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); + + for (i = 0; i < dc_clks->num_levels; i++) { + DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]); + /* translate 10kHz to kHz */ + dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10; + } +} +#endif + +bool dm_pp_get_clock_levels_by_type( + const struct dc_context *ctx, + enum dc_pp_clock_type clk_type, + struct dc_pp_clock_levels *dc_clks) +{ +#ifdef CONFIG_DRM_AMD_POWERPLAY + struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; + struct amd_pp_clocks pp_clks = { 0 }; + struct amd_pp_simple_clock_info validation_clks = { 0 }; + uint32_t i; + + if (amd_powerplay_get_clock_by_type(pp_handle, + dc_to_pp_clock_type(clk_type), &pp_clks)) { + /* Error in pplib. Provide default values. */ + get_default_clock_levels(clk_type, dc_clks); + return true; + } + + pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); + + if (amd_powerplay_get_display_mode_validation_clocks(pp_handle, + &validation_clks)) { + /* Error in pplib. Provide default values. */ + DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); + validation_clks.engine_max_clock = 72000; + validation_clks.memory_max_clock = 80000; + validation_clks.level = 0; + } + + DRM_INFO("DM_PPLIB: Validation clocks:\n"); + DRM_INFO("DM_PPLIB: engine_max_clock: %d\n", + validation_clks.engine_max_clock); + DRM_INFO("DM_PPLIB: memory_max_clock: %d\n", + validation_clks.memory_max_clock); + DRM_INFO("DM_PPLIB: level : %d\n", + validation_clks.level); + + /* Translate 10 kHz to kHz. */ + validation_clks.engine_max_clock *= 10; + validation_clks.memory_max_clock *= 10; + + /* Determine the highest non-boosted level from the Validation Clocks */ + if (clk_type == DC_PP_CLOCK_TYPE_ENGINE_CLK) { + for (i = 0; i < dc_clks->num_levels; i++) { + if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { + /* This clock is higher the validation clock. + * Than means the previous one is the highest + * non-boosted one. */ + DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", + dc_clks->num_levels, i + 1); + dc_clks->num_levels = i; + break; + } + } + } else if (clk_type == DC_PP_CLOCK_TYPE_MEMORY_CLK) { + for (i = 0; i < dc_clks->num_levels; i++) { + if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) { + DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n", + dc_clks->num_levels, i + 1); + dc_clks->num_levels = i; + break; + } + } + } +#else + get_default_clock_levels(clk_type, dc_clks); +#endif + return true; +} + +/**** end of power component interfaces ****/ + + +/* Calls to notification */ + +void dal_notify_setmode_complete(struct dc_context *ctx, + uint32_t h_total, + uint32_t v_total, + uint32_t h_active, + uint32_t v_active, + uint32_t pix_clk_in_khz) +{ + /*TODO*/ +} +/* End of calls to notification */ + +long dm_get_pid(void) +{ + return current->pid; +} + +long dm_get_tgid(void) +{ + return current->tgid; +} diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c new file mode 100644 index 000000000000..7643f751fcc6 --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c @@ -0,0 +1,2577 @@ +/* + * Copyright 2012-13 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/types.h> +#include <linux/version.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_atomic.h> + +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "dm_services_types.h" + +// We need to #undef FRAME_SIZE and DEPRECATED because they conflict +// with ptrace-abi.h's #define's of them. +#undef FRAME_SIZE +#undef DEPRECATED + +#include "dc.h" + +#include "amdgpu_dm_types.h" +#include "amdgpu_dm_mst_types.h" + +struct dm_connector_state { + struct drm_connector_state base; + + enum amdgpu_rmx_type scaling; + uint8_t underscan_vborder; + uint8_t underscan_hborder; + bool underscan_enable; +}; + +#define to_dm_connector_state(x)\ + container_of((x), struct dm_connector_state, base) + +#define AMDGPU_CRTC_MODE_PRIVATE_FLAGS_GAMMASET 1 +#define MAX_TARGET_NUM 6 + +void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { + .destroy = amdgpu_dm_encoder_destroy, +}; + +static void dm_set_cursor( + struct amdgpu_crtc *amdgpu_crtc, + uint64_t gpu_addr, + uint32_t width, + uint32_t height) +{ + struct dc_cursor_attributes attributes; + amdgpu_crtc->cursor_width = width; + amdgpu_crtc->cursor_height = height; + + attributes.address.high_part = upper_32_bits(gpu_addr); + attributes.address.low_part = lower_32_bits(gpu_addr); + attributes.width = width-1; + attributes.height = height-1; + attributes.x_hot = 0; + attributes.y_hot = 0; + attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; + attributes.rotation_angle = 0; + attributes.attribute_flags.value = 0; + + if (!dc_target_set_cursor_attributes( + amdgpu_crtc->target, + &attributes)) { + DRM_ERROR("DC failed to set cursor attributes\n"); + } +} + +static int dm_crtc_unpin_cursor_bo_old( + struct amdgpu_crtc *amdgpu_crtc) +{ + struct amdgpu_bo *robj; + int ret = 0; + + if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) { + robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + + ret = amdgpu_bo_reserve(robj, false); + + if (likely(ret == 0)) { + ret = amdgpu_bo_unpin(robj); + + if (unlikely(ret != 0)) { + DRM_ERROR( + "%s: unpin failed (ret=%d), bo %p\n", + __func__, + ret, + amdgpu_crtc->cursor_bo); + } + + amdgpu_bo_unreserve(robj); + } else { + DRM_ERROR( + "%s: reserve failed (ret=%d), bo %p\n", + __func__, + ret, + amdgpu_crtc->cursor_bo); + } + + drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + amdgpu_crtc->cursor_bo = NULL; + } + + return ret; +} + +static int dm_crtc_pin_cursor_bo_new( + struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + struct amdgpu_bo **ret_obj) +{ + struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_bo *robj; + struct drm_gem_object *obj; + int ret = -EINVAL; + + if (NULL != crtc) { + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + uint64_t gpu_addr; + + amdgpu_crtc = to_amdgpu_crtc(crtc); + + obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); + + if (!obj) { + DRM_ERROR( + "Cannot find cursor object %x for crtc %d\n", + handle, + amdgpu_crtc->crtc_id); + goto release; + } + robj = gem_to_amdgpu_bo(obj); + + ret = amdgpu_bo_reserve(robj, false); + + if (unlikely(ret != 0)) { + drm_gem_object_unreference_unlocked(obj); + DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n", + ret, handle); + goto release; + } + + ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 0, + adev->mc.visible_vram_size, + &gpu_addr); + + if (ret == 0) { + amdgpu_crtc->cursor_addr = gpu_addr; + *ret_obj = robj; + } + amdgpu_bo_unreserve(robj); + if (ret) + drm_gem_object_unreference_unlocked(obj); + + } +release: + + return ret; +} + +static int dm_crtc_cursor_set( + struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height) +{ + struct amdgpu_bo *new_cursor_bo; + struct dc_cursor_position position; + + int ret; + + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + ret = EINVAL; + new_cursor_bo = NULL; + + DRM_DEBUG_KMS( + "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n", + __func__, + amdgpu_crtc->crtc_id, + handle, + width, + height, + amdgpu_crtc->cursor_bo); + + if (!handle) { + /* turn off cursor */ + position.enable = false; + position.x = 0; + position.y = 0; + position.hot_spot_enable = false; + + if (amdgpu_crtc->target) { + /*set cursor visible false*/ + dc_target_set_cursor_position( + amdgpu_crtc->target, + &position); + } + /*unpin old cursor buffer and update cache*/ + ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc); + goto release; + + } + + if ((width > amdgpu_crtc->max_cursor_width) || + (height > amdgpu_crtc->max_cursor_height)) { + DRM_ERROR( + "%s: bad cursor width or height %d x %d\n", + __func__, + width, + height); + goto release; + } + /*try to pin new cursor bo*/ + ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle, &new_cursor_bo); + /*if map not successful then return an error*/ + if (ret) + goto release; + + /*program new cursor bo to hardware*/ + dm_set_cursor(amdgpu_crtc, amdgpu_crtc->cursor_addr, width, height); + + /*un map old, not used anymore cursor bo , + * return memory and mapping back */ + dm_crtc_unpin_cursor_bo_old(amdgpu_crtc); + + /*assign new cursor bo to our internal cache*/ + amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base; + +release: + return ret; + +} + +static int dm_crtc_cursor_move(struct drm_crtc *crtc, + int x, int y) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int xorigin = 0, yorigin = 0; + struct dc_cursor_position position; + + /* avivo cursor are offset into the total surface */ + x += crtc->primary->state->src_x >> 16; + y += crtc->primary->state->src_y >> 16; + + /* + * TODO: for cursor debugging unguard the following + */ +#if 0 + DRM_DEBUG_KMS( + "%s: x %d y %d c->x %d c->y %d\n", + __func__, + x, + y, + crtc->x, + crtc->y); +#endif + + if (x < 0) { + xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); + x = 0; + } + if (y < 0) { + yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); + y = 0; + } + + position.enable = true; + position.x = x; + position.y = y; + + position.hot_spot_enable = true; + position.x_origin = xorigin; + position.y_origin = yorigin; + + if (!dc_target_set_cursor_position( + amdgpu_crtc->target, + &position)) { + DRM_ERROR("DC failed to set cursor position\n"); + return -EINVAL; + } + + return 0; +} + +static void dm_crtc_cursor_reset(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + DRM_DEBUG_KMS( + "%s: with cursor_bo %p\n", + __func__, + amdgpu_crtc->cursor_bo); + + if (amdgpu_crtc->cursor_bo && amdgpu_crtc->target) { + dm_set_cursor( + amdgpu_crtc, + amdgpu_crtc->cursor_addr, + amdgpu_crtc->cursor_width, + amdgpu_crtc->cursor_height); + } +} +static bool fill_rects_from_plane_state( + const struct drm_plane_state *state, + struct dc_surface *surface) +{ + surface->src_rect.x = state->src_x >> 16; + surface->src_rect.y = state->src_y >> 16; + /*we ignore for now mantissa and do not to deal with floating pixels :(*/ + surface->src_rect.width = state->src_w >> 16; + + if (surface->src_rect.width == 0) + return false; + + surface->src_rect.height = state->src_h >> 16; + if (surface->src_rect.height == 0) + return false; + + surface->dst_rect.x = state->crtc_x; + surface->dst_rect.y = state->crtc_y; + + if (state->crtc_w == 0) + return false; + + surface->dst_rect.width = state->crtc_w; + + if (state->crtc_h == 0) + return false; + + surface->dst_rect.height = state->crtc_h; + + surface->clip_rect = surface->dst_rect; + + switch (state->rotation) { + case BIT(DRM_ROTATE_0): + surface->rotation = ROTATION_ANGLE_0; + break; + case BIT(DRM_ROTATE_90): + surface->rotation = ROTATION_ANGLE_90; + break; + case BIT(DRM_ROTATE_180): + surface->rotation = ROTATION_ANGLE_180; + break; + case BIT(DRM_ROTATE_270): + surface->rotation = ROTATION_ANGLE_270; + break; + default: + surface->rotation = ROTATION_ANGLE_0; + break; + } + + return true; +} +static bool get_fb_info( + const struct amdgpu_framebuffer *amdgpu_fb, + uint64_t *tiling_flags, + uint64_t *fb_location) +{ + struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + int r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)){ + DRM_ERROR("Unable to reserve buffer\n"); + return false; + } + + + if (fb_location) + *fb_location = amdgpu_bo_gpu_offset(rbo); + + if (tiling_flags) + amdgpu_bo_get_tiling_flags(rbo, tiling_flags); + + amdgpu_bo_unreserve(rbo); + + return true; +} +static void fill_plane_attributes_from_fb( + struct dc_surface *surface, + const struct amdgpu_framebuffer *amdgpu_fb) +{ + uint64_t tiling_flags; + uint64_t fb_location; + const struct drm_framebuffer *fb = &amdgpu_fb->base; + + get_fb_info( + amdgpu_fb, + &tiling_flags, + &fb_location); + + surface->address.type = PLN_ADDR_TYPE_GRAPHICS; + surface->address.grph.addr.low_part = lower_32_bits(fb_location); + surface->address.grph.addr.high_part = upper_32_bits(fb_location); + + switch (fb->pixel_format) { + case DRM_FORMAT_C8: + surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; + break; + case DRM_FORMAT_RGB565: + surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; + break; + default: + DRM_ERROR("Unsupported screen depth %d\n", fb->bits_per_pixel); + return; + } + + memset(&surface->tiling_info, 0, sizeof(surface->tiling_info)); + + if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) + { + unsigned bankw, bankh, mtaspect, tile_split, num_banks; + + bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); + bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); + mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); + tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); + num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); + + + /* XXX fix me for VI */ + surface->tiling_info.num_banks = num_banks; + surface->tiling_info.array_mode = + DC_ARRAY_2D_TILED_THIN1; + surface->tiling_info.tile_split = tile_split; + surface->tiling_info.bank_width = bankw; + surface->tiling_info.bank_height = bankh; + surface->tiling_info.tile_aspect = mtaspect; + surface->tiling_info.tile_mode = + DC_ADDR_SURF_MICRO_TILING_DISPLAY; + } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) + == DC_ARRAY_1D_TILED_THIN1) { + surface->tiling_info.array_mode = DC_ARRAY_1D_TILED_THIN1; + } + + surface->tiling_info.pipe_config = + AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); + + surface->plane_size.grph.surface_size.x = 0; + surface->plane_size.grph.surface_size.y = 0; + surface->plane_size.grph.surface_size.width = fb->width; + surface->plane_size.grph.surface_size.height = fb->height; + surface->plane_size.grph.surface_pitch = + fb->pitches[0] / (fb->bits_per_pixel / 8); + + surface->visible = true; + surface->scaling_quality.h_taps_c = 2; + surface->scaling_quality.v_taps_c = 2; + + /* TODO: unhardcode */ + surface->colorimetry.limited_range = false; + surface->colorimetry.color_space = SURFACE_COLOR_SPACE_SRGB; + surface->scaling_quality.h_taps = 2; + surface->scaling_quality.v_taps = 2; + surface->stereo_format = PLANE_STEREO_FORMAT_NONE; + +} + +#define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256 + +static void fill_gamma_from_crtc( + const struct drm_crtc *crtc, + struct dc_surface *dc_surface) +{ + int i; + struct gamma_ramp *gamma; + uint16_t *red, *green, *blue; + int end = (crtc->gamma_size > NUM_OF_RAW_GAMMA_RAMP_RGB_256) ? + NUM_OF_RAW_GAMMA_RAMP_RGB_256 : crtc->gamma_size; + + red = crtc->gamma_store; + green = red + crtc->gamma_size; + blue = green + crtc->gamma_size; + + gamma = &dc_surface->gamma_correction; + + for (i = 0; i < end; i++) { + gamma->gamma_ramp_rgb256x3x16.red[i] = + (unsigned short) red[i]; + gamma->gamma_ramp_rgb256x3x16.green[i] = + (unsigned short) green[i]; + gamma->gamma_ramp_rgb256x3x16.blue[i] = + (unsigned short) blue[i]; + } + + gamma->type = GAMMA_RAMP_RBG256X3X16; + gamma->size = sizeof(gamma->gamma_ramp_rgb256x3x16); +} + +static void fill_plane_attributes( + struct dc_surface *surface, + struct drm_plane_state *state) +{ + const struct amdgpu_framebuffer *amdgpu_fb = + to_amdgpu_framebuffer(state->fb); + const struct drm_crtc *crtc = state->crtc; + + fill_rects_from_plane_state(state, surface); + fill_plane_attributes_from_fb( + surface, + amdgpu_fb); + + /* In case of gamma set, update gamma value */ + if (crtc->mode.private_flags & + AMDGPU_CRTC_MODE_PRIVATE_FLAGS_GAMMASET) { + fill_gamma_from_crtc(crtc, surface); + } +} + +/*****************************************************************************/ + +struct amdgpu_connector *aconnector_from_drm_crtc_id( + const struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_connector *connector; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct amdgpu_connector *aconnector; + + list_for_each_entry(connector, + &dev->mode_config.connector_list, head) { + + aconnector = to_amdgpu_connector(connector); + + if (aconnector->base.state->crtc != &acrtc->base) + continue; + + /* Found the connector */ + return aconnector; + } + + /* If we get here, not found. */ + return NULL; +} + +static void calculate_stream_scaling_settings( + const struct drm_display_mode *mode, + const struct dc_stream *stream, + struct dm_connector_state *dm_state) +{ + enum amdgpu_rmx_type rmx_type; + + struct rect src = { 0 }; /* viewport in target space*/ + struct rect dst = { 0 }; /* stream addressable area */ + + /* Full screen scaling by default */ + src.width = mode->hdisplay; + src.height = mode->vdisplay; + dst.width = stream->timing.h_addressable; + dst.height = stream->timing.v_addressable; + + rmx_type = dm_state->scaling; + if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { + if (src.width * dst.height < + src.height * dst.width) { + /* height needs less upscaling/more downscaling */ + dst.width = src.width * + dst.height / src.height; + } else { + /* width needs less upscaling/more downscaling */ + dst.height = src.height * + dst.width / src.width; + } + } else if (rmx_type == RMX_CENTER) { + dst = src; + } + + dst.x = (stream->timing.h_addressable - dst.width) / 2; + dst.y = (stream->timing.v_addressable - dst.height) / 2; + + if (dm_state->underscan_enable) { + dst.x += dm_state->underscan_hborder / 2; + dst.y += dm_state->underscan_vborder / 2; + dst.width -= dm_state->underscan_hborder; + dst.height -= dm_state->underscan_vborder; + } + + dc_update_stream(stream, &src, &dst); + + DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", + dst.x, dst.y, dst.width, dst.height); + +} + +static void dm_dc_surface_commit( + struct dc *dc, + struct drm_crtc *crtc, + struct dm_connector_state *dm_state) +{ + struct dc_surface *dc_surface; + const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct dc_target *dc_target = acrtc->target; + + if (!dc_target) { + dm_error( + "%s: Failed to obtain target on crtc (%d)!\n", + __func__, + acrtc->crtc_id); + goto fail; + } + + dc_surface = dc_create_surface(dc); + + if (!dc_surface) { + dm_error( + "%s: Failed to create a surface!\n", + __func__); + goto fail; + } + + calculate_stream_scaling_settings(&crtc->state->mode, + dc_target->streams[0], + dm_state); + + /* Surface programming */ + fill_plane_attributes(dc_surface, crtc->primary->state); + if (crtc->mode.private_flags & + AMDGPU_CRTC_MODE_PRIVATE_FLAGS_GAMMASET) { + /* reset trigger of gamma */ + crtc->mode.private_flags &= + ~AMDGPU_CRTC_MODE_PRIVATE_FLAGS_GAMMASET; + } + + if (false == dc_commit_surfaces_to_target( + dc, + &dc_surface, + 1, + dc_target)) { + dm_error( + "%s: Failed to attach surface!\n", + __func__); + } + + dc_surface_release(dc_surface); +fail: + return; +} + +static enum dc_color_depth convert_color_depth_from_display_info( + const struct drm_connector *connector) +{ + uint32_t bpc = connector->display_info.bpc; + + /* Limited color depth to 8bit + * TODO: Still need to handle deep color*/ + if (bpc > 8) + bpc = 8; + + switch (bpc) { + case 0: + /* Temporary Work around, DRM don't parse color depth for + * EDID revision before 1.4 + * TODO: Fix edid parsing + */ + return COLOR_DEPTH_888; + case 6: + return COLOR_DEPTH_666; + case 8: + return COLOR_DEPTH_888; + case 10: + return COLOR_DEPTH_101010; + case 12: + return COLOR_DEPTH_121212; + case 14: + return COLOR_DEPTH_141414; + case 16: + return COLOR_DEPTH_161616; + default: + return COLOR_DEPTH_UNDEFINED; + } +} + +static enum dc_aspect_ratio get_aspect_ratio( + const struct drm_display_mode *mode_in) +{ + int32_t width = mode_in->crtc_hdisplay * 9; + int32_t height = mode_in->crtc_vdisplay * 16; + if ((width - height) < 10 && (width - height) > -10) + return ASPECT_RATIO_16_9; + else + return ASPECT_RATIO_4_3; +} + +/*****************************************************************************/ + +static void dc_timing_from_drm_display_mode( + struct dc_crtc_timing *timing_out, + const struct drm_display_mode *mode_in, + const struct drm_connector *connector) +{ + memset(timing_out, 0, sizeof(struct dc_crtc_timing)); + + timing_out->h_border_left = 0; + timing_out->h_border_right = 0; + timing_out->v_border_top = 0; + timing_out->v_border_bottom = 0; + /* TODO: un-hardcode */ + timing_out->pixel_encoding = PIXEL_ENCODING_RGB; + timing_out->timing_standard = TIMING_STANDARD_HDMI; + timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; + timing_out->display_color_depth = convert_color_depth_from_display_info( + connector); + timing_out->scan_type = SCANNING_TYPE_NODATA; + timing_out->hdmi_vic = 0; + timing_out->vic = drm_match_cea_mode(mode_in); + + timing_out->h_addressable = mode_in->crtc_hdisplay; + timing_out->h_total = mode_in->crtc_htotal; + timing_out->h_sync_width = + mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; + timing_out->h_front_porch = + mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; + timing_out->v_total = mode_in->crtc_vtotal; + timing_out->v_addressable = mode_in->crtc_vdisplay; + timing_out->v_front_porch = + mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; + timing_out->v_sync_width = + mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; + timing_out->pix_clk_khz = mode_in->crtc_clock; + timing_out->aspect_ratio = get_aspect_ratio(mode_in); +} + +static void fill_audio_info( + struct audio_info *audio_info, + const struct drm_connector *drm_connector, + const struct dc_sink *dc_sink) +{ + int i = 0; + int cea_revision = 0; + const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; + + audio_info->manufacture_id = edid_caps->manufacturer_id; + audio_info->product_id = edid_caps->product_id; + + cea_revision = drm_connector->display_info.cea_rev; + + while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS && + edid_caps->display_name[i]) { + audio_info->display_name[i] = edid_caps->display_name[i]; + i++; + } + + if(cea_revision >= 3) { + audio_info->mode_count = edid_caps->audio_mode_count; + + for (i = 0; i < audio_info->mode_count; ++i) { + audio_info->modes[i].format_code = + (enum audio_format_code) + (edid_caps->audio_modes[i].format_code); + audio_info->modes[i].channel_count = + edid_caps->audio_modes[i].channel_count; + audio_info->modes[i].sample_rates.all = + edid_caps->audio_modes[i].sample_rate; + audio_info->modes[i].sample_size = + edid_caps->audio_modes[i].sample_size; + } + } + + audio_info->flags.all = edid_caps->speaker_flags; + + /* TODO: We only check for the progressive mode, check for interlace mode too */ + if(drm_connector->latency_present[0]) { + audio_info->video_latency = drm_connector->video_latency[0]; + audio_info->audio_latency = drm_connector->audio_latency[0]; + } + + /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ + +} + +/*TODO: move these defines elsewhere*/ +#define DAL_MAX_CONTROLLERS 4 + +static void copy_crtc_timing_for_drm_display_mode( + const struct drm_display_mode *src_mode, + struct drm_display_mode *dst_mode) +{ + dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; + dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; + dst_mode->crtc_clock = src_mode->crtc_clock; + dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; + dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; + dst_mode->crtc_hsync_start= src_mode->crtc_hsync_start; + dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; + dst_mode->crtc_htotal = src_mode->crtc_htotal; + dst_mode->crtc_hskew = src_mode->crtc_hskew; + dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;; + dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;; + dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;; + dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;; + dst_mode->crtc_vtotal = src_mode->crtc_vtotal;; +} + +static void decide_crtc_timing_for_drm_display_mode( + struct drm_display_mode *drm_mode, + const struct drm_display_mode *native_mode, + bool scale_enabled) +{ + if (scale_enabled) { + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + } else if (native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal) { + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + } else { + /* no scaling nor amdgpu inserted, no need to patch */ + } +} + + +static struct dc_target *create_target_for_sink( + const struct amdgpu_connector *aconnector, + struct drm_display_mode *drm_mode) +{ + struct drm_display_mode *preferred_mode = NULL; + const struct drm_connector *drm_connector; + struct dm_connector_state *dm_state; + struct dc_target *target = NULL; + struct dc_stream *stream; + struct drm_display_mode mode = *drm_mode; + bool native_mode_found = false; + + if (NULL == aconnector) { + DRM_ERROR("aconnector is NULL!\n"); + goto drm_connector_null; + } + + drm_connector = &aconnector->base; + dm_state = to_dm_connector_state(drm_connector->state); + stream = dc_create_stream_for_sink(aconnector->dc_sink); + + if (NULL == stream) { + DRM_ERROR("Failed to create stream for sink!\n"); + goto stream_create_fail; + } + + list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { + /* Search for preferred mode */ + if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { + native_mode_found = true; + break; + } + } + if (!native_mode_found) + preferred_mode = list_first_entry_or_null( + &aconnector->base.modes, + struct drm_display_mode, + head); + if (NULL == preferred_mode) { + DRM_ERROR("No preferred mode found\n"); + goto stream_create_fail; + } + + decide_crtc_timing_for_drm_display_mode( + &mode, preferred_mode, + dm_state->scaling != RMX_OFF); + + dc_timing_from_drm_display_mode(&stream->timing, + &mode, &aconnector->base); + + fill_audio_info( + &stream->audio_info, + drm_connector, + aconnector->dc_sink); + + target = dc_create_target_for_streams(&stream, 1); + dc_stream_release(stream); + + if (NULL == target) { + DRM_ERROR("Failed to create target with streams!\n"); + goto target_create_fail; + } + +drm_connector_null: +target_create_fail: +stream_create_fail: + return target; +} + +void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *dm_crtc = to_amdgpu_crtc(crtc); + + drm_crtc_cleanup(crtc); + destroy_workqueue(dm_crtc->pflip_queue); + kfree(crtc); +} + +static void amdgpu_dm_atomic_crtc_gamma_set( + struct drm_crtc *crtc, + u16 *red, + u16 *green, + u16 *blue, + uint32_t start, + uint32_t size) +{ + struct drm_device *dev = crtc->dev; + struct drm_property *prop = dev->mode_config.prop_crtc_id; + + crtc->state->mode.private_flags |= AMDGPU_CRTC_MODE_PRIVATE_FLAGS_GAMMASET; + + drm_atomic_helper_crtc_set_property(crtc, prop, 0); +} + +static int dm_crtc_funcs_atomic_set_property( + struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state, + struct drm_property *property, + uint64_t val) +{ + struct drm_plane_state *plane_state; + + crtc_state->planes_changed = true; + + /* + * Bit of magic done here. We need to ensure + * that planes get update after mode is set. + * So, we need to add primary plane to state, + * and this way atomic_update would be called + * for it + */ + plane_state = + drm_atomic_get_plane_state( + crtc_state->state, + crtc->primary); + + if (!plane_state) + return -EINVAL; + + return 0; +} + +/* Implemented only the options currently availible for the driver */ +static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .cursor_set = dm_crtc_cursor_set, + .cursor_move = dm_crtc_cursor_move, + .destroy = amdgpu_dm_crtc_destroy, + .gamma_set = amdgpu_dm_atomic_crtc_gamma_set, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .atomic_set_property = dm_crtc_funcs_atomic_set_property +}; + +static enum drm_connector_status +amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) +{ + bool connected; + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + + /* Notes: + * 1. This interface is NOT called in context of HPD irq. + * 2. This interface *is called* in context of user-mode ioctl. Which + * makes it a bad place for *any* MST-related activit. */ + + connected = (NULL != aconnector->dc_sink); + + return (connected ? connector_status_connected : + connector_status_disconnected); +} + +int amdgpu_dm_connector_atomic_set_property( + struct drm_connector *connector, + struct drm_connector_state *connector_state, + struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = dev->dev_private; + struct dm_connector_state *dm_old_state = + to_dm_connector_state(connector->state); + struct dm_connector_state *dm_new_state = + to_dm_connector_state(connector_state); + + if (property == dev->mode_config.scaling_mode_property) { + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; + int i; + enum amdgpu_rmx_type rmx_type; + + switch (val) { + case DRM_MODE_SCALE_CENTER: + rmx_type = RMX_CENTER; + break; + case DRM_MODE_SCALE_ASPECT: + rmx_type = RMX_ASPECT; + break; + case DRM_MODE_SCALE_FULLSCREEN: + rmx_type = RMX_FULL; + break; + case DRM_MODE_SCALE_NONE: + default: + rmx_type = RMX_OFF; + break; + } + + if (dm_old_state->scaling == rmx_type) + return 0; + + dm_new_state->scaling = rmx_type; + + for_each_crtc_in_state( + connector_state->state, + crtc, + new_crtc_state, + i) { + + if (crtc == connector_state->crtc) { + struct drm_plane_state *plane_state; + + new_crtc_state->mode_changed = true; + + /* + * Bit of magic done here. We need to ensure + * that planes get update after mode is set. + * So, we need to add primary plane to state, + * and this way atomic_update would be called + * for it + */ + plane_state = + drm_atomic_get_plane_state( + connector_state->state, + crtc->primary); + + if (!plane_state) + return -EINVAL; + } + } + + return 0; + } else if (property == adev->mode_info.underscan_hborder_property) { + dm_new_state->underscan_hborder = val; + return 0; + } else if (property == adev->mode_info.underscan_vborder_property) { + dm_new_state->underscan_vborder = val; + return 0; + } else if (property == adev->mode_info.underscan_property) { + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; + int i; + + dm_new_state->underscan_enable = val; + + for_each_crtc_in_state( + connector_state->state, + crtc, + new_crtc_state, + i) { + + if (crtc == connector_state->crtc) { + struct drm_plane_state *plane_state; + + /* + * Bit of magic done here. We need to ensure + * that planes get update after mode is set. + * So, we need to add primary plane to state, + * and this way atomic_update would be called + * for it + */ + plane_state = + drm_atomic_get_plane_state( + connector_state->state, + crtc->primary); + + if (!plane_state) + return -EINVAL; + } + } + + return 0; + } + + return -EINVAL; +} + +void amdgpu_dm_connector_destroy(struct drm_connector *connector) +{ + struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); + const struct dc_link *link = aconnector->dc_link; + struct amdgpu_device *adev = connector->dev->dev_private; + struct amdgpu_display_manager *dm = &adev->dm; +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ + defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + + if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { + amdgpu_dm_register_backlight_device(dm); + + if (dm->backlight_dev) { + backlight_device_unregister(dm->backlight_dev); + dm->backlight_dev = NULL; + } + + } +#endif + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) +{ + struct dm_connector_state *state = + to_dm_connector_state(connector->state); + + kfree(state); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + + if (state) { + state->scaling = RMX_OFF; + state->underscan_enable = false; + state->underscan_hborder = 0; + state->underscan_vborder = 0; + + connector->state = &state->base; + connector->state->connector = connector; + } +} + +struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state( + struct drm_connector *connector) +{ + struct dm_connector_state *state = + to_dm_connector_state(connector->state); + + struct dm_connector_state *new_state = + kzalloc(sizeof(*new_state), GFP_KERNEL); + + if (new_state) { + *new_state = *state; + + return &new_state->base; + } + + return NULL; +} + +void amdgpu_dm_connector_atomic_destroy_state( + struct drm_connector *connector, + struct drm_connector_state *state) +{ + struct dm_connector_state *dm_state = + to_dm_connector_state(state); + + __drm_atomic_helper_connector_destroy_state(connector, state); + + kfree(dm_state); +} + +static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .reset = amdgpu_dm_connector_funcs_reset, + .detect = amdgpu_dm_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = drm_atomic_helper_connector_set_property, + .destroy = amdgpu_dm_connector_destroy, + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, + .atomic_destroy_state = amdgpu_dm_connector_atomic_destroy_state, + .atomic_set_property = amdgpu_dm_connector_atomic_set_property +}; + +static struct drm_encoder *best_encoder(struct drm_connector *connector) +{ + int enc_id = connector->encoder_ids[0]; + struct drm_mode_object *obj; + struct drm_encoder *encoder; + + DRM_DEBUG_KMS("Finding the best encoder\n"); + + /* pick the encoder ids */ + if (enc_id) { + obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); + if (!obj) { + DRM_ERROR("Couldn't find a matching encoder for our connector\n"); + return NULL; + } + encoder = obj_to_encoder(obj); + return encoder; + } + DRM_ERROR("No encoder id\n"); + return NULL; +} + +static int get_modes(struct drm_connector *connector) +{ + return amdgpu_dm_connector_get_modes(connector); +} + +int amdgpu_dm_connector_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode) +{ + int result = MODE_ERROR; + const struct dc_sink *dc_sink = + to_amdgpu_connector(connector)->dc_sink; + struct amdgpu_device *adev = connector->dev->dev_private; + struct dc_validation_set val_set = { 0 }; + /* TODO: Unhardcode stream count */ + struct dc_stream *streams[1]; + struct dc_target *target; + + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || + (mode->flags & DRM_MODE_FLAG_DBLSCAN)) + return result; + + if (NULL == dc_sink) { + DRM_ERROR("dc_sink is NULL!\n"); + goto stream_create_fail; + } + + streams[0] = dc_create_stream_for_sink(dc_sink); + + if (NULL == streams[0]) { + DRM_ERROR("Failed to create stream for sink!\n"); + goto stream_create_fail; + } + + drm_mode_set_crtcinfo(mode, 0); + dc_timing_from_drm_display_mode(&streams[0]->timing, mode, connector); + + target = dc_create_target_for_streams(streams, 1); + val_set.target = target; + + if (NULL == val_set.target) { + DRM_ERROR("Failed to create target with stream!\n"); + goto target_create_fail; + } + + val_set.surface_count = 0; + streams[0]->src.width = mode->hdisplay; + streams[0]->src.height = mode->vdisplay; + streams[0]->dst = streams[0]->src; + + if (dc_validate_resources(adev->dm.dc, &val_set, 1)) + result = MODE_OK; + + dc_target_release(target); +target_create_fail: + dc_stream_release(streams[0]); +stream_create_fail: + /* TODO: error handling*/ + return result; +} + + +static const struct drm_connector_helper_funcs +amdgpu_dm_connector_helper_funcs = { + /* + * If hotplug a second bigger display in FB Con mode, bigger resolution + * modes will be filtered by drm_mode_validate_size(), and those modes + * is missing after user start lightdm. So we need to renew modes list. + * in get_modes call back, not just return the modes count + */ + .get_modes = get_modes, + .mode_valid = amdgpu_dm_connector_mode_valid, + .best_encoder = best_encoder +}; + +static void dm_crtc_helper_disable(struct drm_crtc *crtc) +{ +} + +static int dm_crtc_helper_atomic_check( + struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + return 0; +} + +static bool dm_crtc_helper_mode_fixup( + struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { + .disable = dm_crtc_helper_disable, + .atomic_check = dm_crtc_helper_atomic_check, + .mode_fixup = dm_crtc_helper_mode_fixup +}; + +static void dm_encoder_helper_disable(struct drm_encoder *encoder) +{ + +} + +static int dm_encoder_helper_atomic_check( + struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + return 0; +} + +const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { + .disable = dm_encoder_helper_disable, + .atomic_check = dm_encoder_helper_atomic_check +}; + +static const struct drm_plane_funcs dm_plane_funcs = { + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state +}; + +static void clear_unrelated_fields(struct drm_plane_state *state) +{ + state->crtc = NULL; + state->fb = NULL; + state->state = NULL; + state->fence = NULL; +} + +static bool page_flip_needed( + const struct drm_plane_state *new_state, + const struct drm_plane_state *old_state) +{ + struct drm_plane_state old_state_tmp; + struct drm_plane_state new_state_tmp; + + struct amdgpu_framebuffer *amdgpu_fb_old; + struct amdgpu_framebuffer *amdgpu_fb_new; + + uint64_t old_tiling_flags; + uint64_t new_tiling_flags; + + if (!old_state) + return false; + + if (!old_state->fb) + return false; + + if (!new_state) + return false; + + if (!new_state->fb) + return false; + + old_state_tmp = *old_state; + new_state_tmp = *new_state; + + if (!new_state->crtc->state->event) + return false; + + amdgpu_fb_old = to_amdgpu_framebuffer(old_state->fb); + amdgpu_fb_new = to_amdgpu_framebuffer(new_state->fb); + + if (!get_fb_info(amdgpu_fb_old, &old_tiling_flags, NULL)) + return false; + + if (!get_fb_info(amdgpu_fb_new, &new_tiling_flags, NULL)) + return false; + + if (old_tiling_flags != new_tiling_flags) + return false; + + clear_unrelated_fields(&old_state_tmp); + clear_unrelated_fields(&new_state_tmp); + + return memcmp(&old_state_tmp, &new_state_tmp, sizeof(old_state_tmp)) == 0; +} + +static int dm_plane_helper_prepare_fb( + struct drm_plane *plane, + const struct drm_plane_state *new_state) +{ + struct drm_framebuffer *fb = new_state->fb; + struct amdgpu_framebuffer *afb; + struct drm_gem_object *obj; + struct amdgpu_bo *rbo; + int r; + + if (!fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + afb = to_amdgpu_framebuffer(fb); + + obj = afb->obj; + rbo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + + r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL); + + amdgpu_bo_unreserve(rbo); + + if (unlikely(r != 0)) { + DRM_ERROR("Failed to pin framebuffer\n"); + return r; + } + + return 0; +} + +static void dm_plane_helper_cleanup_fb( + struct drm_plane *plane, + const struct drm_plane_state *old_state) +{ + struct drm_framebuffer *fb = old_state->fb; + struct amdgpu_bo *rbo; + struct amdgpu_framebuffer *afb; + int r; + + if (!fb) + return; + + afb = to_amdgpu_framebuffer(fb); + rbo = gem_to_amdgpu_bo(afb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) { + DRM_ERROR("failed to reserve rbo before unpin\n"); + return; + } else { + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } +} + +int dm_create_validation_set_for_target(struct drm_connector *connector, + struct drm_display_mode *mode, struct dc_validation_set *val_set) +{ + int result = MODE_ERROR; + const struct dc_sink *dc_sink = + to_amdgpu_connector(connector)->dc_sink; + /* TODO: Unhardcode stream count */ + struct dc_stream *streams[1]; + struct dc_target *target; + + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || + (mode->flags & DRM_MODE_FLAG_DBLSCAN)) + return result; + + if (NULL == dc_sink) { + DRM_ERROR("dc_sink is NULL!\n"); + return result; + } + + streams[0] = dc_create_stream_for_sink(dc_sink); + + if (NULL == streams[0]) { + DRM_ERROR("Failed to create stream for sink!\n"); + return result; + } + + drm_mode_set_crtcinfo(mode, 0); + dc_timing_from_drm_display_mode(&streams[0]->timing, mode, connector); + + target = dc_create_target_for_streams(streams, 1); + val_set->target = target; + + if (NULL == val_set->target) { + DRM_ERROR("Failed to create target with stream!\n"); + goto fail; + } + + streams[0]->src.width = mode->hdisplay; + streams[0]->src.height = mode->vdisplay; + streams[0]->dst = streams[0]->src; + + return MODE_OK; + +fail: + dc_stream_release(streams[0]); + return result; + +} + +static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { + .prepare_fb = dm_plane_helper_prepare_fb, + .cleanup_fb = dm_plane_helper_cleanup_fb, +}; + +/* + * TODO: these are currently initialized to rgb formats only. + * For future use cases we should either initialize them dynamically based on + * plane capabilities, or initialize this array to all formats, so internal drm + * check will succeed, and let DC to implement proper check + */ +static uint32_t rgb_formats[] = { + DRM_FORMAT_XRGB4444, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, +}; + +int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, + struct amdgpu_crtc *acrtc, + uint32_t crtc_index) +{ + int res = -ENOMEM; + + struct drm_plane *primary_plane = + kzalloc(sizeof(*primary_plane), GFP_KERNEL); + + if (!primary_plane) + goto fail_plane; + + primary_plane->format_default = true; + + res = drm_universal_plane_init( + dm->adev->ddev, + primary_plane, + 0, + &dm_plane_funcs, + rgb_formats, + ARRAY_SIZE(rgb_formats), + DRM_PLANE_TYPE_PRIMARY, + NULL); + + primary_plane->crtc = &acrtc->base; + + drm_plane_helper_add(primary_plane, &dm_plane_helper_funcs); + + res = drm_crtc_init_with_planes( + dm->ddev, + &acrtc->base, + primary_plane, + NULL, + &amdgpu_dm_crtc_funcs, + NULL); + + if (res) + goto fail; + + drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); + + acrtc->max_cursor_width = 128; + acrtc->max_cursor_height = 128; + + acrtc->crtc_id = crtc_index; + acrtc->base.enabled = false; + + dm->adev->mode_info.crtcs[crtc_index] = acrtc; + drm_mode_crtc_set_gamma_size(&acrtc->base, 256); + + acrtc->pflip_queue = + create_singlethread_workqueue("amdgpu-pageflip-queue"); + + return 0; +fail: + kfree(primary_plane); +fail_plane: + acrtc->crtc_id = -1; + return res; +} + +static int to_drm_connector_type(enum signal_type st) +{ + switch (st) { + case SIGNAL_TYPE_HDMI_TYPE_A: + return DRM_MODE_CONNECTOR_HDMIA; + case SIGNAL_TYPE_EDP: + return DRM_MODE_CONNECTOR_eDP; + case SIGNAL_TYPE_RGB: + return DRM_MODE_CONNECTOR_VGA; + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + return DRM_MODE_CONNECTOR_DisplayPort; + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_DVI_SINGLE_LINK: + return DRM_MODE_CONNECTOR_DVID; + case SIGNAL_TYPE_VIRTUAL: + return DRM_MODE_CONNECTOR_VIRTUAL; + + default: + return DRM_MODE_CONNECTOR_Unknown; + } +} + +static void amdgpu_dm_get_native_mode(struct drm_connector *connector) +{ + const struct drm_connector_helper_funcs *helper = + connector->helper_private; + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + encoder = helper->best_encoder(connector); + + if (encoder == NULL) + return; + + amdgpu_encoder = to_amdgpu_encoder(encoder); + + amdgpu_encoder->native_mode.clock = 0; + + if (!list_empty(&connector->probed_modes)) { + struct drm_display_mode *preferred_mode = NULL; + list_for_each_entry(preferred_mode, + &connector->probed_modes, + head) { + if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { + amdgpu_encoder->native_mode = *preferred_mode; + } + break; + } + + } +} + +static struct drm_display_mode *amdgpu_dm_create_common_mode( + struct drm_encoder *encoder, char *name, + int hdisplay, int vdisplay) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_display_mode *mode = NULL; + struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; + + mode = drm_mode_duplicate(dev, native_mode); + + if(mode == NULL) + return NULL; + + mode->hdisplay = hdisplay; + mode->vdisplay = vdisplay; + mode->type &= ~DRM_MODE_TYPE_PREFERRED; + strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN); + + return mode; + +} + +static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_display_mode *mode = NULL; + struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + int i; + int n; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; + }common_modes[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} + }; + + n = sizeof(common_modes) / sizeof(common_modes[0]); + + for (i = 0; i < n; i++) { + struct drm_display_mode *curmode = NULL; + bool mode_existed = false; + + if (common_modes[i].w > native_mode->hdisplay || + common_modes[i].h > native_mode->vdisplay || + (common_modes[i].w == native_mode->hdisplay && + common_modes[i].h == native_mode->vdisplay)) + continue; + + list_for_each_entry(curmode, &connector->probed_modes, head) { + if (common_modes[i].w == curmode->hdisplay && + common_modes[i].h == curmode->vdisplay) { + mode_existed = true; + break; + } + } + + if (mode_existed) + continue; + + mode = amdgpu_dm_create_common_mode(encoder, + common_modes[i].name, common_modes[i].w, + common_modes[i].h); + drm_mode_probed_add(connector, mode); + amdgpu_connector->num_modes++; + } +} + +static void amdgpu_dm_connector_ddc_get_modes( + struct drm_connector *connector, + struct edid *edid) +{ + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + + if (edid) { + /* empty probed_modes */ + INIT_LIST_HEAD(&connector->probed_modes); + amdgpu_connector->num_modes = + drm_add_edid_modes(connector, edid); + + drm_edid_to_eld(connector, edid); + + amdgpu_dm_get_native_mode(connector); + } else + amdgpu_connector->num_modes = 0; +} + +int amdgpu_dm_connector_get_modes(struct drm_connector *connector) +{ + const struct drm_connector_helper_funcs *helper = + connector->helper_private; + struct amdgpu_connector *amdgpu_connector = + to_amdgpu_connector(connector); + struct drm_encoder *encoder; + struct edid *edid = amdgpu_connector->edid; + + encoder = helper->best_encoder(connector); + + amdgpu_dm_connector_ddc_get_modes(connector, edid); + amdgpu_dm_connector_add_common_modes(encoder, connector); + return amdgpu_connector->num_modes; +} + +void amdgpu_dm_connector_init_helper( + struct amdgpu_display_manager *dm, + struct amdgpu_connector *aconnector, + int connector_type, + const struct dc_link *link, + int link_index) +{ + struct amdgpu_device *adev = dm->ddev->dev_private; + + aconnector->connector_id = link_index; + aconnector->dc_link = link; + aconnector->base.interlace_allowed = true; + aconnector->base.doublescan_allowed = true; + aconnector->base.dpms = DRM_MODE_DPMS_OFF; + aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ + + /*configure suport HPD hot plug connector_>polled default value is 0 + * which means HPD hot plug not supported*/ + switch (connector_type) { + case DRM_MODE_CONNECTOR_HDMIA: + aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; + break; + case DRM_MODE_CONNECTOR_DisplayPort: + aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; + break; + case DRM_MODE_CONNECTOR_DVID: + aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; + break; + default: + break; + } + + drm_object_attach_property(&aconnector->base.base, + dm->ddev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_NONE); + + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.underscan_property, + UNDERSCAN_OFF); + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.underscan_hborder_property, + 0); + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.underscan_vborder_property, + 0); +} + +/* Note: this function assumes that dc_link_detect() was called for the + * dc_link which will be represented by this aconnector. */ +int amdgpu_dm_connector_init( + struct amdgpu_display_manager *dm, + struct amdgpu_connector *aconnector, + uint32_t link_index, + struct amdgpu_encoder *aencoder) +{ + int res, connector_type; + struct dc *dc = dm->dc; + const struct dc_link *link = dc_get_link_at_index(dc, link_index); + + DRM_DEBUG_KMS("%s()\n", __func__); + + connector_type = to_drm_connector_type(link->connector_signal); + + res = drm_connector_init( + dm->ddev, + &aconnector->base, + &amdgpu_dm_connector_funcs, + connector_type); + + if (res) { + DRM_ERROR("connector_init failed\n"); + aconnector->connector_id = -1; + return res; + } + + drm_connector_helper_add( + &aconnector->base, + &amdgpu_dm_connector_helper_funcs); + + amdgpu_dm_connector_init_helper( + dm, + aconnector, + connector_type, + link, + link_index); + + drm_mode_connector_attach_encoder( + &aconnector->base, &aencoder->base); + + drm_connector_register(&aconnector->base); + + if (connector_type == DRM_MODE_CONNECTOR_DisplayPort + || connector_type == DRM_MODE_CONNECTOR_eDP) + amdgpu_dm_initialize_mst_connector(dm, aconnector); + +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ + defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + + /* NOTE: this currently will create backlight device even if a panel + * is not connected to the eDP/LVDS connector. + * + * This is less than ideal but we don't have sink information at this + * stage since detection happens after. We can't do detection earlier + * since MST detection needs connectors to be created first. + */ + if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { + /* Event if registration failed, we should continue with + * DM initialization because not having a backlight control + * is better then a black screen. */ + amdgpu_dm_register_backlight_device(dm); + + if (dm->backlight_dev) + dm->backlight_link = link; + } +#endif + + return 0; +} + +int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) +{ + switch (adev->mode_info.num_crtc) { + case 1: + return 0x1; + case 2: + return 0x3; + case 3: + return 0x7; + case 4: + return 0xf; + case 5: + return 0x1f; + case 6: + default: + return 0x3f; + } +} + +int amdgpu_dm_encoder_init( + struct drm_device *dev, + struct amdgpu_encoder *aencoder, + uint32_t link_index) +{ + struct amdgpu_device *adev = dev->dev_private; + + int res = drm_encoder_init(dev, + &aencoder->base, + &amdgpu_dm_encoder_funcs, + DRM_MODE_ENCODER_TMDS, + NULL); + + aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); + + if (!res) + aencoder->encoder_id = link_index; + else + aencoder->encoder_id = -1; + + drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); + + return res; +} + +enum dm_commit_action { + DM_COMMIT_ACTION_NOTHING, + DM_COMMIT_ACTION_RESET, + DM_COMMIT_ACTION_DPMS_ON, + DM_COMMIT_ACTION_DPMS_OFF, + DM_COMMIT_ACTION_SET +}; + +static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state) +{ + /* mode changed means either actually mode changed or enabled changed */ + /* active changed means dpms changed */ + if (state->mode_changed) { + /* if it is got disabled - call reset mode */ + if (!state->enable) + return DM_COMMIT_ACTION_RESET; + + if (state->active) + return DM_COMMIT_ACTION_SET; + else + return DM_COMMIT_ACTION_RESET; + } else { + /* ! mode_changed */ + + /* if it is remain disable - skip it */ + if (!state->enable) + return DM_COMMIT_ACTION_NOTHING; + + if (state->active_changed) { + if (state->active) { + return DM_COMMIT_ACTION_DPMS_ON; + } else { + return DM_COMMIT_ACTION_DPMS_OFF; + } + } else { + /* ! active_changed */ + return DM_COMMIT_ACTION_NOTHING; + } + } +} + +static void manage_dm_interrupts( + struct amdgpu_device *adev, + struct amdgpu_crtc *acrtc, + bool enable) +{ + /* + * this is not correct translation but will work as soon as VBLANK + * constant is the same as PFLIP + */ + int irq_type = + amdgpu_crtc_idx_to_irq_type( + adev, + acrtc->crtc_id); + + if (enable) { + drm_crtc_vblank_on(&acrtc->base); + amdgpu_irq_get( + adev, + &adev->pageflip_irq, + irq_type); + } else { + unsigned long flags; + amdgpu_irq_put( + adev, + &adev->pageflip_irq, + irq_type); + drm_crtc_vblank_off(&acrtc->base); + + /* + * should be called here, to guarantee no works left in queue. + * As this function sleeps it was bug to call it inside the + * amdgpu_dm_flip_cleanup function under locked event_lock + */ + flush_workqueue(acrtc->pflip_queue); + + /* + * TODO: once Vitaly's change to adjust locking in + * page_flip_work_func is submitted to base driver move + * lock and check to amdgpu_dm_flip_cleanup function + */ + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + if (acrtc->pflip_status != AMDGPU_FLIP_NONE) { + /* + * this is the case when on reset, last pending pflip + * interrupt did not not occur. Clean-up + */ + amdgpu_dm_flip_cleanup(adev, acrtc); + } + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + } +} + +/* + * Handle headless hotplug workaround + * + * In case of headless hotplug, if plugging the same monitor to the same + * DDI, DRM consider it as mode unchanged. We should check whether the + * sink pointer changed, and set mode_changed properly to + * make sure commit is doing everything. + */ +static void handle_headless_hotplug( + const struct amdgpu_crtc *acrtc, + struct drm_crtc_state *state, + struct amdgpu_connector **aconnector) +{ + struct amdgpu_connector *old_connector = + aconnector_from_drm_crtc_id(&acrtc->base); + + /* + * TODO Revisit this. This code is kinda hacky and might break things. + */ + + if (!old_connector) + return; + + if (!*aconnector) + *aconnector = old_connector; + + if (acrtc->target && (*aconnector)->dc_sink) { + if ((*aconnector)->dc_sink != + acrtc->target->streams[0]->sink) { + state->mode_changed = true; + } + } + + if (!acrtc->target) { + /* In case of headless with DPMS on, when system waked up, + * if no monitor connected, target is null and will not create + * new target, on that condition, we should check + * if any connector is connected, if connected, + * it means a hot plug happened after wake up, + * mode_changed should be set to true to make sure + * commit targets will do everything. + */ + state->mode_changed = + (*aconnector)->base.status == + connector_status_connected; + } else { + /* In case of headless hotplug, if plug same monitor to same + * DDI, DRM consider it as mode unchanged, we should check + * sink pointer changed, and set mode changed properly to + * make sure commit doing everything. + */ + /* check if sink has changed from last commit */ + if ((*aconnector)->dc_sink && (*aconnector)->dc_sink != + acrtc->target->streams[0]->sink) + state->mode_changed = true; + } +} + +int amdgpu_dm_atomic_commit( + struct drm_device *dev, + struct drm_atomic_state *state, + bool async) +{ + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state; + uint32_t i, j; + int32_t ret; + uint32_t commit_targets_count = 0; + uint32_t new_crtcs_count = 0; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + + struct dc_target *commit_targets[DAL_MAX_CONTROLLERS]; + struct amdgpu_crtc *new_crtcs[DAL_MAX_CONTROLLERS]; + + /* In this step all new fb would be pinned */ + + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) + return ret; + + /* + * This is the point of no return - everything below never fails except + * when the hw goes bonghits. Which means we can commit the new state on + * the software side now. + */ + + drm_atomic_helper_swap_state(dev, state); + + /* + * From this point state become old state really. New state is + * initialized to appropriate objects and could be accessed from there + */ + + /* + * there is no fences usage yet in state. We can skip the following line + * wait_for_fences(dev, state); + */ + + drm_atomic_helper_update_legacy_modeset_state(dev, state); + + /* update changed items */ + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { + struct amdgpu_crtc *acrtc; + struct amdgpu_connector *aconnector = NULL; + enum dm_commit_action action; + struct drm_crtc_state *new_state = crtc->state; + struct drm_connector *connector; + struct drm_connector_state *old_con_state; + + acrtc = to_amdgpu_crtc(crtc); + + for_each_connector_in_state( + state, + connector, + old_con_state, + j) { + if (connector->state->crtc == crtc) { + aconnector = to_amdgpu_connector(connector); + break; + } + } + + /* handles headless hotplug case, updating new_state and + * aconnector as needed + */ + handle_headless_hotplug(acrtc, new_state, &aconnector); + + action = get_dm_commit_action(new_state); + + switch (action) { + case DM_COMMIT_ACTION_DPMS_ON: + case DM_COMMIT_ACTION_SET: { + struct dc_target *new_target = + create_target_for_sink( + aconnector, + &crtc->state->mode); + + DRM_DEBUG_KMS("Atomic commit: SET.\n"); + + if (!new_target) { + /* + * this could happen because of issues with + * userspace notifications delivery. + * In this case userspace tries to set mode on + * display which is disconnect in fact. + * dc_sink in NULL in this case on aconnector. + * We expect reset mode will come soon. + * + * This can also happen when unplug is done + * during resume sequence ended + */ + new_state->planes_changed = false; + DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n", + __func__, acrtc->base.base.id); + break; + } + + if (acrtc->target) { + /* + * we evade vblanks and pflips on crtc that + * should be changed + */ + manage_dm_interrupts(adev, acrtc, false); + /* this is the update mode case */ + dc_target_release(acrtc->target); + acrtc->target = NULL; + } + + /* + * this loop saves set mode crtcs + * we needed to enable vblanks once all + * resources acquired in dc after dc_commit_targets + */ + new_crtcs[new_crtcs_count] = acrtc; + new_crtcs_count++; + + acrtc->target = new_target; + acrtc->enabled = true; + + break; + } + + case DM_COMMIT_ACTION_NOTHING: + break; + + case DM_COMMIT_ACTION_DPMS_OFF: + case DM_COMMIT_ACTION_RESET: + DRM_DEBUG_KMS("Atomic commit: RESET.\n"); + /* i.e. reset mode */ + if (acrtc->target) { + manage_dm_interrupts(adev, acrtc, false); + + dc_target_release(acrtc->target); + acrtc->target = NULL; + acrtc->enabled = false; + } + break; + } /* switch() */ + } /* for_each_crtc_in_state() */ + + commit_targets_count = 0; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + if (acrtc->target) { + commit_targets[commit_targets_count] = acrtc->target; + ++commit_targets_count; + } + } + + /* DC is optimized not to do anything if 'targets' didn't change. */ + dc_commit_targets(dm->dc, commit_targets, commit_targets_count); + + /* update planes when needed */ + for_each_plane_in_state(state, plane, old_plane_state, i) { + struct drm_plane_state *plane_state = plane->state; + struct drm_crtc *crtc = plane_state->crtc; + struct drm_framebuffer *fb = plane_state->fb; + struct drm_connector *connector; + struct dm_connector_state *dm_state = NULL; + enum dm_commit_action action; + + if (!fb || !crtc || !crtc->state->planes_changed || + !crtc->state->active) + continue; + + action = get_dm_commit_action(crtc->state); + + /* Surfaces are created under two scenarios: + * 1. This commit is not a page flip. + * 2. This commit is a page flip, and targets are created. + */ + if (!page_flip_needed(plane_state, old_plane_state) || + action == DM_COMMIT_ACTION_DPMS_ON || + action == DM_COMMIT_ACTION_SET) { + list_for_each_entry(connector, + &dev->mode_config.connector_list, head) { + if (connector->state->crtc == crtc) { + dm_state = to_dm_connector_state( + connector->state); + break; + } + } + + /* + * This situation happens in the following case: + * we are about to get set mode for connector who's only + * possible crtc (in encoder crtc mask) is used by + * another connector, that is why it will try to + * re-assing crtcs in order to make configuration + * supported. For our implementation we need to make all + * encoders support all crtcs, then this issue will + * never arise again. But to guard code from this issue + * check is left. + * + * Also it should be needed when used with actual + * drm_atomic_commit ioctl in future + */ + if (!dm_state) + continue; + + dm_dc_surface_commit( + dm->dc, + crtc, + dm_state); + } + } + + for (i = 0; i < new_crtcs_count; i++) { + /* + * loop to enable interrupts on newly arrived crtc + */ + struct amdgpu_crtc *acrtc = new_crtcs[i]; + + manage_dm_interrupts(adev, acrtc, true); + dm_crtc_cursor_reset(&acrtc->base); + + } + + /* Page flip if needed */ + for_each_plane_in_state(state, plane, old_plane_state, i) { + struct drm_plane_state *plane_state = plane->state; + struct drm_crtc *crtc = plane_state->crtc; + struct drm_framebuffer *fb = plane_state->fb; + + if (!fb || !crtc || !crtc->state->planes_changed || + !crtc->state->active) + continue; + + if (page_flip_needed(plane_state, old_plane_state)) + amdgpu_crtc_page_flip( + crtc, + fb, + crtc->state->event, + 0); + } + + drm_atomic_helper_wait_for_vblanks(dev, state); + + /* In this state all old framebuffers would be unpinned */ + + drm_atomic_helper_cleanup_planes(dev, state); + + drm_atomic_state_free(state); + + return 0; +} + +static uint32_t add_val_sets_surface( + struct dc_validation_set *val_sets, + uint32_t set_count, + const struct dc_target *target, + const struct dc_surface *surface) +{ + uint32_t i = 0; + + while (i < set_count) { + if (val_sets[i].target == target) + break; + ++i; + } + + val_sets[i].surfaces[val_sets[i].surface_count] = surface; + val_sets[i].surface_count++; + + return val_sets[i].surface_count; +} + +static uint32_t update_in_val_sets_target( + struct dc_validation_set *val_sets, + uint32_t set_count, + const struct dc_target *old_target, + const struct dc_target *new_target) +{ + uint32_t i = 0; + + while (i < set_count) { + if (val_sets[i].target == old_target) + break; + ++i; + } + + val_sets[i].target = new_target; + + if (i == set_count) { + /* nothing found. add new one to the end */ + return set_count + 1; + } + + return set_count; +} + +static uint32_t remove_from_val_sets( + struct dc_validation_set *val_sets, + uint32_t set_count, + const struct dc_target *target) +{ + uint32_t i = 0; + + while (i < set_count) { + if (val_sets[i].target == target) + break; + ++i; + } + + if (i == set_count) { + /* nothing found */ + return set_count; + } + + memmove( + &val_sets[i], + &val_sets[i + 1], + sizeof(struct dc_validation_set *) * (set_count - i - 1)); + + return set_count - 1; +} + +int amdgpu_dm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + int i; + int j; + int ret; + int set_count; + int new_target_count; + struct dc_validation_set set[MAX_TARGET_NUM] = {{ 0 }}; + struct dc_target *new_targets[MAX_TARGET_NUM] = { 0 }; + struct amdgpu_device *adev = dev->dev_private; + struct dc *dc = adev->dm.dc; + bool need_to_validate = false; + + ret = drm_atomic_helper_check(dev, state); + + if (ret) { + DRM_ERROR("Atomic state validation failed with error :%d !\n", + ret); + return ret; + } + + ret = -EINVAL; + + /* copy existing configuration */ + new_target_count = 0; + set_count = 0; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + if (acrtc->target) { + set[set_count].target = acrtc->target; + ++set_count; + } + } + + /* update changed items */ + for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct amdgpu_crtc *acrtc = NULL; + struct amdgpu_connector *aconnector = NULL; + enum dm_commit_action action; + struct drm_connector *connector; + struct drm_connector_state *con_state; + + acrtc = to_amdgpu_crtc(crtc); + + for_each_connector_in_state(state, connector, con_state, j) { + if (con_state->crtc == crtc) { + aconnector = to_amdgpu_connector(connector); + break; + } + } + + /*TODO: + handle_headless_hotplug(acrtc, crtc_state, &aconnector);*/ + + action = get_dm_commit_action(crtc_state); + + switch (action) { + case DM_COMMIT_ACTION_DPMS_ON: + case DM_COMMIT_ACTION_SET: { + struct drm_display_mode mode = crtc_state->mode; + struct dc_target *new_target = NULL; + + if (!aconnector) { + DRM_ERROR( + "%s: Can't find connector for crtc %d\n", + __func__, + acrtc->crtc_id); + goto connector_not_found; + } + + new_target = + create_target_for_sink( + aconnector, + &mode); + + /* + * we can have no target on ACTION_SET if a display + * was disconnected during S3, in this case it not and + * error, the OS will be updated after detection, and + * do the right thing on next atomic commit + */ + if (!new_target) { + DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n", + __func__, acrtc->base.base.id); + break; + } + + new_targets[new_target_count] = new_target; + + set_count = update_in_val_sets_target( + set, + set_count, + acrtc->target, + new_target); + new_target_count++; + need_to_validate = true; + break; + } + + case DM_COMMIT_ACTION_NOTHING: + break; + case DM_COMMIT_ACTION_DPMS_OFF: + case DM_COMMIT_ACTION_RESET: + /* i.e. reset mode */ + if (acrtc->target) { + set_count = remove_from_val_sets( + set, + set_count, + acrtc->target); + } + break; + } + } + + + for (i = 0; i < set_count; i++) { + for_each_plane_in_state(state, plane, plane_state, j) { + struct drm_plane_state *old_plane_state = plane->state; + struct drm_framebuffer *fb = plane_state->fb; + struct drm_crtc *crtc = plane_state->crtc; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + if (!fb || acrtc->target != set[i].target) + continue; + + if (!crtc->state->planes_changed) + continue; + + if (!page_flip_needed(plane_state, old_plane_state)) { + struct dc_surface *surface = + dc_create_surface(dc); + + fill_plane_attributes( + surface, + plane_state); + + add_val_sets_surface( + set, + set_count, + acrtc->target, + surface); + need_to_validate = true; + } + } + + } + + if (need_to_validate == false || set_count == 0 + || dc_validate_resources(dc, set, set_count)) + ret = 0; + +connector_not_found: + for (i = 0; i < set_count; i++) { + for (j = 0; j < set[i].surface_count; j++) { + dc_surface_release(set[i].surfaces[j]); + } + } + for (i = 0; i < new_target_count; i++) + dc_target_release(new_targets[i]); + + if (ret != 0) + DRM_ERROR("Atomic check failed.\n"); + + return ret; +} diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h new file mode 100644 index 000000000000..0481075de6e9 --- /dev/null +++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h @@ -0,0 +1,100 @@ +/* + * Copyright 2012-13 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __AMDGPU_DM_TYPES_H__ +#define __AMDGPU_DM_TYPES_H__ + +#include <drm/drmP.h> + +struct amdgpu_framebuffer; +struct amdgpu_display_manager; +struct dc_validation_set; +struct dc_surface; + +/*TODO Jodan Hersen use the one in amdgpu_dm*/ +int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, + struct amdgpu_crtc *amdgpu_crtc, + uint32_t link_index); +int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_connector *amdgpu_connector, + uint32_t link_index, + struct amdgpu_encoder *amdgpu_encoder); +int amdgpu_dm_encoder_init( + struct drm_device *dev, + struct amdgpu_encoder *aencoder, + uint32_t link_index); + +void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc); +void amdgpu_dm_connector_destroy(struct drm_connector *connector); +void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder); + +void dm_add_display_info( + struct drm_display_info *disp_info, + struct amdgpu_display_manager *dm, + uint32_t display_index); + +int amdgpu_dm_connector_get_modes(struct drm_connector *connector); + +int amdgpu_dm_atomic_commit( + struct drm_device *dev, + struct drm_atomic_state *state, + bool async); +int amdgpu_dm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state); + +int dm_create_validation_set_for_target( + struct drm_connector *connector, + struct drm_display_mode *mode, + struct dc_validation_set *val_set); + +void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector); +struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state( + struct drm_connector *connector); +void amdgpu_dm_connector_atomic_destroy_state( + struct drm_connector *connector, + struct drm_connector_state *state); +int amdgpu_dm_connector_atomic_set_property( + struct drm_connector *connector, + struct drm_connector_state *state, + struct drm_property *property, + uint64_t val); + +int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev); + +void amdgpu_dm_connector_init_helper( + struct amdgpu_display_manager *dm, + struct amdgpu_connector *aconnector, + int connector_type, + const struct dc_link *link, + int link_index); + +int amdgpu_dm_connector_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode); + +extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; + +#endif /* __AMDGPU_DM_TYPES_H__ */ diff --git a/drivers/gpu/drm/amd/dal/dc/dm_services.h b/drivers/gpu/drm/amd/dal/dc/dm_services.h index 206c0b7df7c6..47bbb9e6e060 100644 --- a/drivers/gpu/drm/amd/dal/dc/dm_services.h +++ b/drivers/gpu/drm/amd/dal/dc/dm_services.h @@ -166,19 +166,7 @@ bool dm_exec_bios_cmd_table( uint32_t index, void *params); -#ifdef BUILD_DAL_TEST -uint32_t dm_bios_cmd_table_para_revision( -struct dc_context *ctx, - uint32_t index); -bool dm_bios_cmd_table_revision( - struct dc_context *ctx, - uint32_t index, - uint8_t *frev, - uint8_t *crev); -#endif - -#ifndef BUILD_DAL_TEST static inline uint32_t dm_bios_cmd_table_para_revision( struct dc_context *ctx, uint32_t index) @@ -195,11 +183,6 @@ static inline uint32_t dm_bios_cmd_table_para_revision( return crev; } -#else -uint32_t dm_bios_cmd_table_para_revision( - struct dc_context *ctx, - uint32_t index); -#endif /************************************** * Power Play (PP) interfaces -- 2.1.4 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel