From 3261e013c0cad780a9a70176569e5ba9aba6e9b0 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Wed, 6 Nov 2019 14:23:55 -0500 Subject: drm/amd/display: Add MST atomic routines - Adding encoder atomic check to find vcpi slots for a connector - Using DRM helper functions to calculate PBN - Adding connector atomic check to release vcpi slots if connector loses CRTC - Calculate PBN and VCPI slots only once during atomic check and store them on crtc_state to eliminate redundant calculation - Call drm_dp_mst_atomic_check to verify validity of MST topology during state atomic check v2: squashed previous 3 separate patches, removed DSC PBN calculation, and added PBN and VCPI slots properties to amdgpu connector v3: - moved vcpi_slots and pbn properties to dm_crtc_state and dc_stream_state - updates stream's vcpi_slots and pbn on commit - separated patch from the DSC MST series v4: - set vcpi_slots and pbn properties to dm_connector_state - copy porperties from connector state on to crtc state v5: - keep the pbn and vcpi values only on connnector state - added a void pointer to the stream state instead on two ints, because dc_stream_state is OS agnostic. Pointer points to the current dm_connector_state. v6: - Remove new param from stream v7: - Fix error with using max capable bpc v8: - squash in fix from Gustavo A. R. Silva Reviewed-by: Lyude Paul Reviewed-by: Nicholas Kazlauskas Signed-off-by: Mikita Lipski Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 65 +++++++++++++++++++++- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 + .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 50 ++++------------- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 32 +++++++++++ 4 files changed, 108 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index caba9ecac723..6ec8473f2cbd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4095,7 +4095,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->underscan_hborder = 0; state->underscan_vborder = 0; state->base.max_requested_bpc = 8; - + state->vcpi_slots = 0; + state->pbn = 0; if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) state->abm_level = amdgpu_dm_abm_level; @@ -4123,7 +4124,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) new_state->underscan_enable = state->underscan_enable; new_state->underscan_hborder = state->underscan_hborder; new_state->underscan_vborder = state->underscan_vborder; - + new_state->vcpi_slots = state->vcpi_slots; + new_state->pbn = state->pbn; return &new_state->base; } @@ -4520,10 +4522,64 @@ static void dm_encoder_helper_disable(struct drm_encoder *encoder) } +static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth) +{ + switch (display_color_depth) { + case COLOR_DEPTH_666: + return 6; + case COLOR_DEPTH_888: + return 8; + case COLOR_DEPTH_101010: + return 10; + case COLOR_DEPTH_121212: + return 12; + case COLOR_DEPTH_141414: + return 14; + case COLOR_DEPTH_161616: + return 16; + default: + break; + } + return 0; +} + static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { + struct drm_atomic_state *state = crtc_state->state; + struct drm_connector *connector = conn_state->connector; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_port *mst_port; + enum dc_color_depth color_depth; + int clock, bpp = 0; + + if (!aconnector->port || !aconnector->dc_sink) + return 0; + + mst_port = aconnector->port; + mst_mgr = &aconnector->mst_port->mst_mgr; + + if (!crtc_state->connectors_changed && !crtc_state->mode_changed) + return 0; + + if (!state->duplicated) { + color_depth = convert_color_depth_from_display_info(connector, conn_state); + bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; + clock = adjusted_mode->clock; + dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp); + } + dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, + mst_mgr, + mst_port, + dm_new_connector_state->pbn); + if (dm_new_connector_state->vcpi_slots < 0) { + DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); + return dm_new_connector_state->vcpi_slots; + } return 0; } @@ -7566,6 +7622,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; + /* Perform validation of MST topology in the state*/ + ret = drm_dp_mst_atomic_check(state); + if (ret) + goto fail; + if (state->legacy_cursor_update) { /* * This is a fast cursor update coming from the plane update diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 77c5166e6b08..d26bb591cefe 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -359,6 +359,8 @@ struct dm_connector_state { bool underscan_enable; bool freesync_capable; uint8_t abm_level; + int vcpi_slots; + uint64_t pbn; }; #define to_dm_connector_state(x)\ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 11e5784aa62a..4882b47e85ef 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -182,15 +182,19 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( bool enable) { struct amdgpu_dm_connector *aconnector; + struct dm_connector_state *dm_conn_state; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; - int slots = 0; bool ret; - int clock; - int bpp = 0; - int pbn = 0; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + /* Accessing the connector state is required for vcpi_slots allocation + * and directly relies on behaviour in commit check + * that blocks before commit guaranteeing that the state + * is not gonna be swapped while still in use in commit tail */ + + dm_conn_state = to_dm_connector_state(aconnector->base.state); + if (!aconnector || !aconnector->mst_port) return false; @@ -203,42 +207,10 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( mst_port = aconnector->port; if (enable) { - clock = stream->timing.pix_clk_100hz / 10; - - switch (stream->timing.display_color_depth) { - - case COLOR_DEPTH_666: - bpp = 6; - break; - case COLOR_DEPTH_888: - bpp = 8; - break; - case COLOR_DEPTH_101010: - bpp = 10; - break; - case COLOR_DEPTH_121212: - bpp = 12; - break; - case COLOR_DEPTH_141414: - bpp = 14; - break; - case COLOR_DEPTH_161616: - bpp = 16; - break; - default: - ASSERT(bpp != 0); - break; - } - - bpp = bpp * 3; - - /* TODO need to know link rate */ - - pbn = drm_dp_calc_pbn_mode(clock, bpp); - - slots = drm_dp_find_vcpi_slots(mst_mgr, pbn); - ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots); + ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, + dm_conn_state->pbn, + dm_conn_state->vcpi_slots); if (!ret) return false; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 49cf39711dfc..ba508a897263 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -246,11 +246,43 @@ dm_dp_mst_detect(struct drm_connector *connector, aconnector->port); } +static int dm_dp_mst_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct drm_connector_state *new_conn_state = + drm_atomic_get_new_connector_state(state, connector); + struct drm_connector_state *old_conn_state = + drm_atomic_get_old_connector_state(state, connector); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct drm_crtc_state *new_crtc_state; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_port *mst_port; + + mst_port = aconnector->port; + mst_mgr = &aconnector->mst_port->mst_mgr; + + if (!old_conn_state->crtc) + return 0; + + if (new_conn_state->crtc) { + new_crtc_state = drm_atomic_get_old_crtc_state(state, new_conn_state->crtc); + if (!new_crtc_state || + !drm_atomic_crtc_needs_modeset(new_crtc_state) || + new_crtc_state->enable) + return 0; + } + + return drm_dp_atomic_release_vcpi_slots(state, + mst_mgr, + mst_port); +} + static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { .get_modes = dm_dp_mst_get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, .atomic_best_encoder = dm_mst_atomic_best_encoder, .detect_ctx = dm_dp_mst_detect, + .atomic_check = dm_dp_mst_atomic_check, }; static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) -- cgit From 320f6d81aaeed354f832a46685d5eac91006db72 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 8 Nov 2019 14:38:14 +0000 Subject: drm/amd/display: fix dereference of pointer aconnector when it is null Currently pointer aconnector is being dereferenced by the call to to_dm_connector_state before it is being null checked, this could lead to a null pointer dereference. Fix this by checking that aconnector is null before dereferencing it. Addresses-Coverity: ("Dereference before null check") Reviewed-by: Mikita Lipski Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 4882b47e85ef..57a226c0bc4a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -193,12 +193,11 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( * that blocks before commit guaranteeing that the state * is not gonna be swapped while still in use in commit tail */ - dm_conn_state = to_dm_connector_state(aconnector->base.state); - - if (!aconnector || !aconnector->mst_port) return false; + dm_conn_state = to_dm_connector_state(aconnector->base.state); + mst_mgr = &aconnector->mst_port->mst_mgr; if (!mst_mgr->mst_state) -- cgit From 02350f0bdf44d888b2c3284ecd9836524d930e00 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 22 Oct 2019 13:07:55 -0400 Subject: drm/amdgpu: Add ucode support for DMCUB The DMCUB is a secondary DMCU (Display MicroController Unit) that has its own separate firmware. It's required for DMCU support on Renoir. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 11 ++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 9 +++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 833fc4b68940..9ef312428231 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -447,6 +447,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, const struct common_firmware_header *header = NULL; const struct gfx_firmware_header_v1_0 *cp_hdr = NULL; const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL; + const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL; if (NULL == ucode->fw) return 0; @@ -460,6 +461,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, header = (const struct common_firmware_header *)ucode->fw->data; cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data; + dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP || (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && @@ -470,7 +472,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM && ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM && ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM && - ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV)) { + ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV && + ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) { ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + @@ -506,6 +509,12 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, le32_to_cpu(header->ucode_array_offset_bytes) + le32_to_cpu(dmcu_hdr->intv_offset_bytes)), ucode->ucode_size); + } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCUB) { + ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes); + memcpy(ucode->kaddr, + (void *)((uint8_t *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes)), + ucode->ucode_size); } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) { ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 410587b950f3..eaf2d5b9c92f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -251,6 +251,13 @@ struct dmcu_firmware_header_v1_0 { uint32_t intv_size_bytes; /* size of interrupt vectors, in bytes */ }; +/* version_major=1, version_minor=0 */ +struct dmcub_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t inst_const_bytes; /* size of instruction region, in bytes */ + uint32_t bss_data_bytes; /* size of bss/data region, in bytes */ +}; + /* header is fixed size */ union amdgpu_firmware_header { struct common_firmware_header common; @@ -268,6 +275,7 @@ union amdgpu_firmware_header { struct sdma_firmware_header_v1_1 sdma_v1_1; struct gpu_info_firmware_header_v1_0 gpu_info; struct dmcu_firmware_header_v1_0 dmcu; + struct dmcub_firmware_header_v1_0 dmcub; uint8_t raw[0x100]; }; @@ -307,6 +315,7 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_DMCU_INTV, AMDGPU_UCODE_ID_VCN0_RAM, AMDGPU_UCODE_ID_VCN1_RAM, + AMDGPU_UCODE_ID_DMCUB, AMDGPU_UCODE_ID_MAXIMUM, }; -- cgit From 2bd2a27ffc7cc43216a35ad516c5b96156c52fb8 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 22 Oct 2019 13:24:00 -0400 Subject: drm/amdgpu: Add PSP loading support for DMCUB ucode DMCUB ucode requires secure loading through PSP. This is already supported in PSP as GFX_FW_TYPE_DMUB, it just needs to be mapped from AMDGPU_UCODE_ID_DMCUB to GFX_FW_TYPE_DMUB. DMUB is a shorthand name for DMCUB and can be used interchangeably. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 1f044c8cf239..b1662af0f0cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1305,6 +1305,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_VCN1_RAM: *type = GFX_FW_TYPE_VCN1_RAM; break; + case AMDGPU_UCODE_ID_DMCUB: + *type = GFX_FW_TYPE_DMUB; + break; case AMDGPU_UCODE_ID_MAXIMUM: default: return -EINVAL; -- cgit From 9e0880d9e28efc95c3731dcf6aa636fff605a7fc Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Fri, 25 Oct 2019 12:46:52 -0400 Subject: drm/amd/display: Drop DMCUB from DCN21 resources The interface to the DMCUB won't be through DC itself. DC will instead call into the DMUB interface introduced with a future change. The CONFIG_DRM_AMD_DC_DMUB defines will still be used for now but will be dropped at the end of the series. Since this define was never configurable in the first place this code wasn't used. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 31 ---------------------- 1 file changed, 31 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 459bd9a5caed..1042197f1859 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -373,20 +373,6 @@ static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN20(_MASK) }; -#ifdef CONFIG_DRM_AMD_DC_DMUB -static const struct dcn21_dmcub_registers dmcub_regs = { - DMCUB_REG_LIST_DCN() -}; - -static const struct dcn21_dmcub_shift dmcub_shift = { - DMCUB_COMMON_MASK_SH_LIST_BASE(__SHIFT) -}; - -static const struct dcn21_dmcub_mask dmcub_mask = { - DMCUB_COMMON_MASK_SH_LIST_BASE(_MASK) -}; -#endif - #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ @@ -970,11 +956,6 @@ static void destruct(struct dcn21_resource_pool *pool) if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); -#ifdef CONFIG_DRM_AMD_DC_DMUB - if (pool->base.dmcub != NULL) - dcn21_dmcub_destroy(&pool->base.dmcub); -#endif - if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); @@ -1766,18 +1747,6 @@ static bool construct( goto create_fail; } -#ifdef CONFIG_DRM_AMD_DC_DMUB - pool->base.dmcub = dcn21_dmcub_create(ctx, - &dmcub_regs, - &dmcub_shift, - &dmcub_mask); - if (pool->base.dmcub == NULL) { - dm_error("DC: failed to create dmcub!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } -#endif - pool->base.pp_smu = dcn21_pp_smu_create(ctx); num_pipes = dcn2_1_ip.max_num_dpp; -- cgit From 7c008829cdc13012ce705ebd46c81a7ca5aeff8b Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Fri, 25 Oct 2019 11:28:35 -0400 Subject: drm/amd/display: Add the DMUB service The DMUB service is the interface to the DMCUB. It's required to support Renoir features so it will be enabled and compiled automatically when the Renoir display engine is enabled via CONFIG_DRM_AMD_DC_DCN2_1. DMUB code will initially be guarded by CONFIG_DRM_AMD_DC_DMUB and later switched to CONFIG_DRM_AMD_DC_DCN2_1 with the config option dropped. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/Kconfig | 6 + drivers/gpu/drm/amd/display/Makefile | 8 + drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 256 +++++++++++ drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h | 129 ++++++ drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h | 505 +++++++++++++++++++++ .../drm/amd/display/dmub/inc/dmub_trace_buffer.h | 51 +++ drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h | 64 +++ drivers/gpu/drm/amd/display/dmub/src/Makefile | 29 ++ drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 137 ++++++ drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h | 62 +++ drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c | 126 +++++ drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h | 45 ++ drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c | 109 +++++ drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h | 120 +++++ drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 415 +++++++++++++++++ 15 files changed, 2062 insertions(+) create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h create mode 100644 drivers/gpu/drm/amd/display/dmub/src/Makefile create mode 100644 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c create mode 100644 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h create mode 100644 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c create mode 100644 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h create mode 100644 drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c create mode 100644 drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h create mode 100644 drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 313183b80032..fced39e229d5 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -30,6 +30,7 @@ config DRM_AMD_DC_DCN2_1 bool "DCN 2.1 family" depends on DRM_AMD_DC && X86 depends on DRM_AMD_DC_DCN2_0 + select DRM_AMD_DC_DMUB help Choose this option if you want to have Renoir support for display engine @@ -52,6 +53,11 @@ config DRM_AMD_DC_HDCP if you want to support HDCP authentication +config DRM_AMD_DC_DMUB + def_bool n + help + DMUB support for display engine + config DEBUG_KERNEL_DC bool "Enable kgdb break in DC" depends on DRM_AMD_DC diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 36b3d6a5d04d..3c7332be4a89 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -38,6 +38,10 @@ ifdef CONFIG_DRM_AMD_DC_HDCP subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp endif +ifdef CONFIG_DRM_AMD_DC_DMUB +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc +endif + #TODO: remove when Timing Sync feature is complete subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 @@ -47,6 +51,10 @@ ifdef CONFIG_DRM_AMD_DC_HDCP DAL_LIBS += modules/hdcp endif +ifdef CONFIG_DRM_AMD_DC_DMUB +DAL_LIBS += dmub/src +endif + AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) include $(AMD_DAL) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h new file mode 100644 index 000000000000..b25f92e3280d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -0,0 +1,256 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_CMD_H_ +#define _DMUB_CMD_H_ + +#include "dmub_types.h" +#include "atomfirmware.h" + +#define DMUB_RB_CMD_SIZE 64 +#define DMUB_RB_MAX_ENTRY 128 +#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY) +#define REG_SET_MASK 0xFFFF + +enum dmub_cmd_type { + DMUB_CMD__NULL, + DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE, + DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ, + DMUB_CMD__REG_SEQ_BURST_WRITE, + DMUB_CMD__REG_REG_WAIT, + DMUB_CMD__DIGX_ENCODER_CONTROL, + DMUB_CMD__SET_PIXEL_CLOCK, + DMUB_CMD__ENABLE_DISP_POWER_GATING, + DMUB_CMD__DPPHY_INIT, + DMUB_CMD__DIG1_TRANSMITTER_CONTROL, + + // PSR + DMUB_CMD__PSR_ENABLE, + DMUB_CMD__PSR_DISABLE, + DMUB_CMD__PSR_COPY_SETTINGS, + DMUB_CMD__PSR_SET_LEVEL, +}; + +#pragma pack(push, 1) + +struct dmub_cmd_header { + enum dmub_cmd_type type : 8; + unsigned int reserved0 : 16; + unsigned int payload_bytes : 6; /* up to 60 bytes */ + unsigned int reserved : 2; +}; + +/* + * Read modify write + * + * 60 payload bytes can hold up to 5 sets of read modify writes, + * each take 3 dwords. + * + * number of sequences = header.payload_bytes / sizeof(struct dmub_cmd_read_modify_write_sequence) + * + * modify_mask = 0xffff'ffff means all fields are going to be updated. in this case + * command parser will skip the read and we can use modify_mask = 0xffff'ffff as reg write + */ +struct dmub_cmd_read_modify_write_sequence { + uint32_t addr; + uint32_t modify_mask; + uint32_t modify_value; +}; + +#define DMUB_READ_MODIFY_WRITE_SEQ__MAX 5 +struct dmub_rb_cmd_read_modify_write { + struct dmub_cmd_header header; // type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE + struct dmub_cmd_read_modify_write_sequence seq[DMUB_READ_MODIFY_WRITE_SEQ__MAX]; +}; + +/* + * Update a register with specified masks and values sequeunce + * + * 60 payload bytes can hold address + up to 7 sets of mask/value combo, each take 2 dword + * + * number of field update sequence = (header.payload_bytes - sizeof(addr)) / sizeof(struct read_modify_write_sequence) + * + * + * USE CASE: + * 1. auto-increment register where additional read would update pointer and produce wrong result + * 2. toggle a bit without read in the middle + */ + +struct dmub_cmd_reg_field_update_sequence { + uint32_t modify_mask; // 0xffff'ffff to skip initial read + uint32_t modify_value; +}; + +#define DMUB_REG_FIELD_UPDATE_SEQ__MAX 7 + +struct dmub_rb_cmd_reg_field_update_sequence { + struct dmub_cmd_header header; + uint32_t addr; + struct dmub_cmd_reg_field_update_sequence seq[DMUB_REG_FIELD_UPDATE_SEQ__MAX]; +}; + + +/* + * Burst write + * + * support use case such as writing out LUTs. + * + * 60 payload bytes can hold up to 14 values to write to given address + * + * number of payload = header.payload_bytes / sizeof(struct read_modify_write_sequence) + */ +#define DMUB_BURST_WRITE_VALUES__MAX 14 +struct dmub_rb_cmd_burst_write { + struct dmub_cmd_header header; // type = DMUB_CMD__REG_SEQ_BURST_WRITE + uint32_t addr; + uint32_t write_values[DMUB_BURST_WRITE_VALUES__MAX]; +}; + + +struct dmub_rb_cmd_common { + struct dmub_cmd_header header; + uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header)]; +}; + +struct dmub_cmd_reg_wait_data { + uint32_t addr; + uint32_t mask; + uint32_t condition_field_value; + uint32_t time_out_us; +}; + +struct dmub_rb_cmd_reg_wait { + struct dmub_cmd_header header; + struct dmub_cmd_reg_wait_data reg_wait; +}; + +struct dmub_cmd_digx_encoder_control_data { + union dig_encoder_control_parameters_v1_5 dig; +}; + +struct dmub_rb_cmd_digx_encoder_control { + struct dmub_cmd_header header; + struct dmub_cmd_digx_encoder_control_data encoder_control; +}; + +struct dmub_cmd_set_pixel_clock_data { + struct set_pixel_clock_parameter_v1_7 clk; +}; + +struct dmub_rb_cmd_set_pixel_clock { + struct dmub_cmd_header header; + struct dmub_cmd_set_pixel_clock_data pixel_clock; +}; + +struct dmub_cmd_enable_disp_power_gating_data { + struct enable_disp_power_gating_parameters_v2_1 pwr; +}; + +struct dmub_rb_cmd_enable_disp_power_gating { + struct dmub_cmd_header header; + struct dmub_cmd_enable_disp_power_gating_data power_gating; +}; + +struct dmub_cmd_dig1_transmitter_control_data { + struct dig_transmitter_control_parameters_v1_6 dig; +}; + +struct dmub_rb_cmd_dig1_transmitter_control { + struct dmub_cmd_header header; + struct dmub_cmd_dig1_transmitter_control_data transmitter_control; +}; + +struct dmub_rb_cmd_dpphy_init { + struct dmub_cmd_header header; + uint8_t reserved[60]; +}; + +struct dmub_cmd_psr_copy_settings_data { + uint32_t reg1; + uint32_t reg2; + uint32_t reg3; +}; + +struct dmub_rb_cmd_psr_copy_settings { + struct dmub_cmd_header header; + struct dmub_cmd_psr_copy_settings_data psr_copy_settings_data; +}; + +struct dmub_cmd_psr_set_level_data { + uint16_t psr_level; +}; + +struct dmub_rb_cmd_psr_set_level { + struct dmub_cmd_header header; + struct dmub_cmd_psr_set_level_data psr_set_level_data; +}; + +struct dmub_rb_cmd_psr_disable { + struct dmub_cmd_header header; +}; + +struct dmub_rb_cmd_psr_enable { + struct dmub_cmd_header header; +}; + +struct dmub_cmd_psr_notify_vblank_data { + uint32_t vblank_int; // Which vblank interrupt was triggered +}; + +struct dmub_rb_cmd_notify_vblank { + struct dmub_cmd_header header; + struct dmub_cmd_psr_notify_vblank_data psr_notify_vblank_data; +}; + +struct dmub_cmd_psr_notify_static_state_data { + uint32_t ss_int; // Which static screen interrupt was triggered + uint32_t ss_enter; // Enter (1) or exit (0) static screen +}; + +struct dmub_rb_cmd_psr_notify_static_state { + struct dmub_cmd_header header; + struct dmub_cmd_psr_notify_static_state_data psr_notify_static_state_data; +}; + +union dmub_rb_cmd { + struct dmub_rb_cmd_read_modify_write read_modify_write; + struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq; + struct dmub_rb_cmd_burst_write burst_write; + struct dmub_rb_cmd_reg_wait reg_wait; + struct dmub_rb_cmd_common cmd_common; + struct dmub_rb_cmd_digx_encoder_control digx_encoder_control; + struct dmub_rb_cmd_set_pixel_clock set_pixel_clock; + struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating; + struct dmub_rb_cmd_dpphy_init dpphy_init; + struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control; + struct dmub_rb_cmd_psr_enable psr_enable; + struct dmub_rb_cmd_psr_disable psr_disable; + struct dmub_rb_cmd_psr_copy_settings psr_copy_settings; + struct dmub_rb_cmd_psr_set_level psr_set_level; +}; + +#pragma pack(pop) + +#endif /* _DMUB_CMD_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h new file mode 100644 index 000000000000..ac22744eaa94 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h @@ -0,0 +1,129 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_RB_H_ +#define _DMUB_RB_H_ + +#include "dmub_types.h" +#include "dmub_cmd.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +struct dmub_cmd_header; + +struct dmub_rb_init_params { + void *ctx; + void *base_address; + uint32_t capacity; +}; + +struct dmub_rb { + void *base_address; + uint32_t data_count; + uint32_t rptr; + uint32_t wrpt; + uint32_t capacity; + + void *ctx; + void *dmub; +}; + + +static inline bool dmub_rb_empty(struct dmub_rb *rb) +{ + return (rb->wrpt == rb->rptr); +} + +static inline bool dmub_rb_full(struct dmub_rb *rb) +{ + uint32_t data_count; + + if (rb->wrpt >= rb->rptr) + data_count = rb->wrpt - rb->rptr; + else + data_count = rb->capacity - (rb->rptr - rb->wrpt); + + return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE)); +} + +static inline bool dmub_rb_push_front(struct dmub_rb *rb, + const struct dmub_cmd_header *cmd) +{ + uint8_t *wt_ptr = (uint8_t *)(rb->base_address) + rb->wrpt; + + if (dmub_rb_full(rb)) + return false; + + dmub_memcpy(wt_ptr, cmd, DMUB_RB_CMD_SIZE); + rb->wrpt += DMUB_RB_CMD_SIZE; + + if (rb->wrpt >= rb->capacity) + rb->wrpt %= rb->capacity; + + return true; +} + +static inline bool dmub_rb_front(struct dmub_rb *rb, + struct dmub_cmd_header *cmd) +{ + uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr; + + if (dmub_rb_empty(rb)) + return false; + + dmub_memcpy(cmd, rd_ptr, DMUB_RB_CMD_SIZE); + + return true; +} + +static inline bool dmub_rb_pop_front(struct dmub_rb *rb) +{ + if (dmub_rb_empty(rb)) + return false; + + rb->rptr += DMUB_RB_CMD_SIZE; + + if (rb->rptr >= rb->capacity) + rb->rptr %= rb->capacity; + + return true; +} + +static inline void dmub_rb_init(struct dmub_rb *rb, + struct dmub_rb_init_params *init_params) +{ + rb->base_address = init_params->base_address; + rb->capacity = init_params->capacity; + rb->rptr = 0; + rb->wrpt = 0; +} + +#if defined(__cplusplus) +} +#endif + +#endif /* _DMUB_RB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h new file mode 100644 index 000000000000..aa8f0396616d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -0,0 +1,505 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_SRV_H_ +#define _DMUB_SRV_H_ + +/** + * DOC: DMUB interface and operation + * + * DMUB is the interface to the display DMCUB microcontroller on DCN hardware. + * It delegates hardware initialization and command submission to the + * microcontroller. DMUB is the shortname for DMCUB. + * + * This interface is not thread-safe. Ensure that all access to the interface + * is properly synchronized by the caller. + * + * Initialization and usage of the DMUB service should be done in the + * steps given below: + * + * 1. dmub_srv_create() + * 2. dmub_srv_has_hw_support() + * 3. dmub_srv_calc_region_info() + * 4. dmub_srv_hw_init() + * + * The call to dmub_srv_create() is required to use the server. + * + * The calls to dmub_srv_has_hw_support() and dmub_srv_calc_region_info() + * are helpers to query cache window size and allocate framebuffer(s) + * for the cache windows. + * + * The call to dmub_srv_hw_init() programs the DMCUB registers to prepare + * for command submission. Commands can be queued via dmub_srv_cmd_queue() + * and executed via dmub_srv_cmd_execute(). + * + * If the queue is full the dmub_srv_wait_for_idle() call can be used to + * wait until the queue has been cleared. + * + * Destroying the DMUB service can be done by calling dmub_srv_destroy(). + * This does not clear DMUB hardware state, only software state. + * + * The interface is intended to be standalone and should not depend on any + * other component within DAL. + */ + +#include "dmub_types.h" +#include "dmub_cmd.h" +#include "dmub_rb.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Forward declarations */ +struct dmub_srv; +struct dmub_cmd_header; +struct dmcu; + +/* enum dmub_status - return code for dmcub functions */ +enum dmub_status { + DMUB_STATUS_OK = 0, + DMUB_STATUS_NO_CTX, + DMUB_STATUS_QUEUE_FULL, + DMUB_STATUS_TIMEOUT, + DMUB_STATUS_INVALID, +}; + +/* enum dmub_asic - dmub asic identifier */ +enum dmub_asic { + DMUB_ASIC_NONE = 0, + DMUB_ASIC_DCN20, + DMUB_ASIC_DCN21, + DMUB_ASIC_MAX, +}; + +/* enum dmub_window_id - dmub window identifier */ +enum dmub_window_id { + DMUB_WINDOW_0_INST_CONST = 0, + DMUB_WINDOW_1_STACK, + DMUB_WINDOW_2_BSS_DATA, + DMUB_WINDOW_3_VBIOS, + DMUB_WINDOW_4_MAILBOX, + DMUB_WINDOW_5_TRACEBUFF, + DMUB_WINDOW_6_RESERVED, + DMUB_WINDOW_7_RESERVED, + DMUB_WINDOW_TOTAL, +}; + +/** + * struct dmub_region - dmub hw memory region + * @base: base address for region, must be 256 byte aligned + * @top: top address for region + */ +struct dmub_region { + uint32_t base; + uint32_t top; +}; + +/** + * struct dmub_window - dmub hw cache window + * @off: offset to the fb memory in gpu address space + * @r: region in uc address space for cache window + */ +struct dmub_window { + union dmub_addr offset; + struct dmub_region region; +}; + +/** + * struct dmub_fb - defines a dmub framebuffer memory region + * @cpu_addr: cpu virtual address for the region, NULL if invalid + * @gpu_addr: gpu virtual address for the region, NULL if invalid + * @size: size of the region in bytes, zero if invalid + */ +struct dmub_fb { + void *cpu_addr; + uint64_t gpu_addr; + uint32_t size; +}; + +/** + * struct dmub_srv_region_params - params used for calculating dmub regions + * @inst_const_size: size of the fw inst const section + * @bss_data_size: size of the fw bss data section + * @vbios_size: size of the vbios data + */ +struct dmub_srv_region_params { + uint32_t inst_const_size; + uint32_t bss_data_size; + uint32_t vbios_size; +}; + +/** + * struct dmub_srv_region_info - output region info from the dmub service + * @fb_size: required minimum fb size for all regions, aligned to 4096 bytes + * @num_regions: number of regions used by the dmub service + * @regions: region info + * + * The regions are aligned such that they can be all placed within the + * same framebuffer but they can also be placed into different framebuffers. + * + * The size of each region can be calculated by the caller: + * size = reg.top - reg.base + * + * Care must be taken when performing custom allocations to ensure that each + * region base address is 256 byte aligned. + */ +struct dmub_srv_region_info { + uint32_t fb_size; + uint8_t num_regions; + struct dmub_region regions[DMUB_WINDOW_TOTAL]; +}; + +/** + * struct dmub_srv_fb_params - parameters used for driver fb setup + * @region_info: region info calculated by dmub service + * @cpu_addr: base cpu address for the framebuffer + * @gpu_addr: base gpu virtual address for the framebuffer + */ +struct dmub_srv_fb_params { + const struct dmub_srv_region_info *region_info; + void *cpu_addr; + uint64_t gpu_addr; +}; + +/** + * struct dmub_srv_fb_info - output fb info from the dmub service + * @num_fbs: number of required dmub framebuffers + * @fbs: fb data for each region + * + * Output from the dmub service helper that can be used by the + * driver to prepare dmub_fb that can be passed into the dmub + * hw init service. + * + * Assumes that all regions are within the same framebuffer + * and have been setup according to the region_info generated + * by the dmub service. + */ +struct dmub_srv_fb_info { + uint8_t num_fb; + struct dmub_fb fb[DMUB_WINDOW_TOTAL]; +}; + +/** + * struct dmub_srv_base_funcs - Driver specific base callbacks + */ +struct dmub_srv_base_funcs { + /** + * @reg_read: + * + * Hook for reading a register. + * + * Return: The 32-bit register value from the given address. + */ + uint32_t (*reg_read)(void *ctx, uint32_t address); + + /** + * @reg_write: + * + * Hook for writing a value to the register specified by address. + */ + void (*reg_write)(void *ctx, uint32_t address, uint32_t value); +}; + +/** + * struct dmub_srv_hw_funcs - hardware sequencer funcs for dmub + */ +struct dmub_srv_hw_funcs { + /* private: internal use only */ + + void (*reset)(struct dmub_srv *dmub); + + void (*reset_release)(struct dmub_srv *dmub); + + void (*backdoor_load)(struct dmub_srv *dmub, + const struct dmub_window *cw0, + const struct dmub_window *cw1); + + void (*setup_windows)(struct dmub_srv *dmub, + const struct dmub_window *cw2, + const struct dmub_window *cw3, + const struct dmub_window *cw4, + const struct dmub_window *cw5); + + void (*setup_mailbox)(struct dmub_srv *dmub, + const struct dmub_region *inbox1); + + uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub); + + void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset); + + bool (*is_supported)(struct dmub_srv *dmub); + + bool (*is_phy_init)(struct dmub_srv *dmub); + + bool (*is_auto_load_done)(struct dmub_srv *dmub); +}; + +/** + * struct dmub_srv_create_params - params for dmub service creation + * @base_funcs: driver supplied base routines + * @hw_funcs: optional overrides for hw funcs + * @user_ctx: context data for callback funcs + * @asic: driver supplied asic + * @is_virtual: false for hw support only + */ +struct dmub_srv_create_params { + struct dmub_srv_base_funcs funcs; + struct dmub_srv_hw_funcs *hw_funcs; + void *user_ctx; + enum dmub_asic asic; + bool is_virtual; +}; + +/* + * struct dmub_srv_hw_params - params for dmub hardware initialization + * @fb: framebuffer info for each region + * @fb_base: base of the framebuffer aperture + * @fb_offset: offset of the framebuffer aperture + * @psp_version: psp version to pass for DMCU init + */ +struct dmub_srv_hw_params { + struct dmub_fb *fb[DMUB_WINDOW_TOTAL]; + uint64_t fb_base; + uint64_t fb_offset; + uint32_t psp_version; +}; + +/** + * struct dmub_srv - software state for dmcub + * @asic: dmub asic identifier + * @user_ctx: user provided context for the dmub_srv + * @is_virtual: false if hardware support only + */ +struct dmub_srv { + enum dmub_asic asic; + void *user_ctx; + bool is_virtual; + + /* private: internal use only */ + struct dmub_srv_base_funcs funcs; + struct dmub_srv_hw_funcs hw_funcs; + struct dmub_rb inbox1_rb; + + bool sw_init; + bool hw_init; + + uint64_t fb_base; + uint64_t fb_offset; + uint32_t psp_version; +}; + +/** + * dmub_srv_create() - creates the DMUB service. + * @dmub: the dmub service + * @params: creation parameters for the service + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_create(struct dmub_srv *dmub, + const struct dmub_srv_create_params *params); + +/** + * dmub_srv_destroy() - destroys the DMUB service. + * @dmub: the dmub service + */ +void dmub_srv_destroy(struct dmub_srv *dmub); + +/** + * dmub_srv_calc_region_info() - retreives region info from the dmub service + * @dmub: the dmub service + * @params: parameters used to calculate region locations + * @info_out: the output region info from dmub + * + * Calculates the base and top address for all relevant dmub regions + * using the parameters given (if any). + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status +dmub_srv_calc_region_info(struct dmub_srv *dmub, + const struct dmub_srv_region_params *params, + struct dmub_srv_region_info *out); + +/** + * dmub_srv_calc_region_info() - retreives fb info from the dmub service + * @dmub: the dmub service + * @params: parameters used to calculate fb locations + * @info_out: the output fb info from dmub + * + * Calculates the base and top address for all relevant dmub regions + * using the parameters given (if any). + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, + const struct dmub_srv_fb_params *params, + struct dmub_srv_fb_info *out); + +/** + * dmub_srv_has_hw_support() - returns hw support state for dmcub + * @dmub: the dmub service + * @is_supported: hw support state + * + * Queries the hardware for DMCUB support and returns the result. + * + * Can be called before dmub_srv_hw_init(). + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, + bool *is_supported); + +/** + * dmub_srv_hw_init() - initializes the underlying DMUB hardware + * @dmub: the dmub service + * @params: params for hardware initialization + * + * Resets the DMUB hardware and performs backdoor loading of the + * required cache regions based on the input framebuffer regions. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_NO_CTX - dmcub context not initialized + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, + const struct dmub_srv_hw_params *params); + +/** + * dmub_srv_cmd_queue() - queues a command to the DMUB + * @dmub: the dmub service + * @cmd: the command to queue + * + * Queues a command to the DMUB service but does not begin execution + * immediately. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_QUEUE_FULL - no remaining room in queue + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, + const struct dmub_cmd_header *cmd); + +/** + * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub + * @dmub: the dmub service + * + * Begins exeuction of queued commands on the dmub. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub); + +/** + * dmub_srv_cmd_submit() - submits a command to the DMUB immediately + * @dmub: the dmub service + * @cmd: the command to submit + * @timeout_us: the maximum number of microseconds to wait + * + * Submits a command to the DMUB with an optional timeout. + * If timeout_us is given then the service will attempt to + * resubmit for the given number of microseconds. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for submit timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_cmd_submit(struct dmub_srv *dmub, + const struct dmub_cmd_header *cmd, + uint32_t timeout_us); + +/** + * dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until firmware has been autoloaded by the DMCUB. The maximum + * wait time is given in microseconds to prevent spinning forever. + * + * On ASICs without firmware autoload support this function will return + * immediately. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for phy init timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, + uint32_t timeout_us); + +/** + * dmub_srv_wait_for_phy_init() - Waits for DMUB PHY init to complete + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until the PHY has been initialized by the DMUB. The maximum + * wait time is given in microseconds to prevent spinning forever. + * + * On ASICs without PHY init support this function will return + * immediately. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for phy init timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, + uint32_t timeout_us); + +/** + * dmub_srv_wait_for_idle() - Waits for the DMUB to be idle + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until the DMUB buffer is empty and all commands have + * finished processing. The maximum wait time is given in + * microseconds to prevent spinning forever. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, + uint32_t timeout_us); + +#if defined(__cplusplus) +} +#endif + +#endif /* _DMUB_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h new file mode 100644 index 000000000000..9707706ba8ce --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef _DMUB_TRACE_BUFFER_H_ +#define _DMUB_TRACE_BUFFER_H_ + +#include "dmub_types.h" + +#define LOAD_DMCU_FW 1 +#define LOAD_PHY_FW 2 + +struct dmcub_trace_buf_entry { + uint32_t trace_code; + uint32_t tick_count; + uint32_t param0; + uint32_t param1; +}; + +#define TRACE_BUF_SIZE (1024) //1 kB +#define PERF_TRACE_MAX_ENTRY ((TRACE_BUF_SIZE - 8)/sizeof(struct dmcub_trace_buf_entry)) + +struct dmcub_trace_buf { + uint32_t entry_count; + uint32_t clk_freq; + struct dmcub_trace_buf_entry entries[PERF_TRACE_MAX_ENTRY]; +}; + + + +#endif /* _DMUB_TRACE_BUFFER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h new file mode 100644 index 000000000000..41d524b0db2f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_TYPES_H_ +#define _DMUB_TYPES_H_ + +/* Basic type definitions. */ +#include +#include +#include +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#ifndef dmub_memcpy +#define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes)) +#endif + +#ifndef dmub_memset +#define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes)) +#endif + +#ifndef dmub_udelay +#define dmub_udelay(microseconds) udelay(microseconds) +#endif + +union dmub_addr { + struct { + uint32_t low_part; + uint32_t high_part; + } u; + uint64_t quad_part; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* _DMUB_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile new file mode 100644 index 000000000000..f3b844f474fd --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile @@ -0,0 +1,29 @@ +# +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# + +ifdef CONFIG_DRM_AMD_DC_DMUB +DMUB = dmub_srv.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o + +AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DMUB) +endif diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c new file mode 100644 index 000000000000..236a4156bbe1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -0,0 +1,137 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../inc/dmub_srv.h" +#include "dmub_reg.h" + +#include "dcn/dcn_2_0_0_offset.h" +#include "dcn/dcn_2_0_0_sh_mask.h" +#include "soc15_hw_ip.h" +#include "vega10_ip_offset.h" + +#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg +#define CTX dmub + +void dmub_dcn20_reset(struct dmub_srv *dmub) +{ + REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); +} + +void dmub_dcn20_reset_release(struct dmub_srv *dmub) +{ + REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF); + REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0); +} + +void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, struct dmub_window *cw0, + struct dmub_window *cw1) +{ + REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x4, + DMCUB_MEM_WRITE_SPACE, 0x4); + + REG_WRITE(DMCUB_REGION3_CW0_OFFSET, cw0->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, cw0->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); + REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, + DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, + DMCUB_REGION3_CW0_ENABLE, 1); + + REG_WRITE(DMCUB_REGION3_CW1_OFFSET, cw1->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, cw1->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); + REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, + DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, + DMCUB_REGION3_CW1_ENABLE, 1); + + REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, + 0x20); +} + +void dmub_dcn20_setup_windows(struct dmub_srv *dmub, + const struct dmub_window *cw2, + const struct dmub_window *cw3, + const struct dmub_window *cw4, + const struct dmub_window *cw5) +{ + REG_WRITE(DMCUB_REGION3_CW2_OFFSET, cw2->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, cw2->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); + REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, + DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, + DMCUB_REGION3_CW2_ENABLE, 1); + + REG_WRITE(DMCUB_REGION3_CW3_OFFSET, cw3->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, cw3->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); + REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, + DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, + DMCUB_REGION3_CW3_ENABLE, 1); + + /* TODO: Move this to CW4. */ + + REG_WRITE(DMCUB_REGION4_OFFSET, cw4->offset.u.low_part); + REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, cw4->offset.u.high_part); + REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, + cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, + 1); +} + +void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, + const struct dmub_region *inbox1) +{ + /* TODO: Use CW4 instead of region 4. */ + + REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000); + REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); + REG_WRITE(DMCUB_INBOX1_WPTR, 0); +} + +uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_INBOX1_RPTR); +} + +void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset) +{ + REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset); +} + +bool dmub_dcn20_is_supported(struct dmub_srv *dmub) +{ + uint32_t supported = 0; + + REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported); + + return supported; +} + +bool dmub_dcn20_is_phy_init(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_SCRATCH10) != 0; +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h new file mode 100644 index 000000000000..41269da40363 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -0,0 +1,62 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DCN20_H_ +#define _DMUB_DCN20_H_ + +#include "../inc/dmub_types.h" + +struct dmub_srv; + +/* Hardware functions. */ + +void dmub_dcn20_init(struct dmub_srv *dmub); + +void dmub_dcn20_reset(struct dmub_srv *dmub); + +void dmub_dcn20_reset_release(struct dmub_srv *dmub); + +void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, + const struct dmub_window *cw0, + const struct dmub_window *cw1); + +void dmub_dcn20_setup_windows(struct dmub_srv *dmub, + const struct dmub_window *cw2, + const struct dmub_window *cw3, + const struct dmub_window *cw4, + const struct dmub_window *cw5); + +void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, + const struct dmub_region *inbox1); + +uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub); + +void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); + +bool dmub_dcn20_is_supported(struct dmub_srv *dmub); + +bool dmub_dcn20_is_phy_init(struct dmub_srv *dmub); + +#endif /* _DMUB_DCN20_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c new file mode 100644 index 000000000000..d40a808112e7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c @@ -0,0 +1,126 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../inc/dmub_srv.h" +#include "dmub_reg.h" + +#include "dcn/dcn_2_1_0_offset.h" +#include "dcn/dcn_2_1_0_sh_mask.h" +#include "renoir_ip_offset.h" + +#define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg +#define CTX dmub + +static inline void dmub_dcn21_translate_addr(const union dmub_addr *addr_in, + uint64_t fb_base, + uint64_t fb_offset, + union dmub_addr *addr_out) +{ + addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; +} + +void dmub_dcn21_backdoor_load(struct dmub_srv *dmub, + const struct dmub_window *cw0, + const struct dmub_window *cw1) +{ + union dmub_addr offset; + uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; + + REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, + DMCUB_MEM_WRITE_SPACE, 0x3); + + dmub_dcn21_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); + REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, + DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, + DMCUB_REGION3_CW0_ENABLE, 1); + + dmub_dcn21_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); + REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, + DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, + DMCUB_REGION3_CW1_ENABLE, 1); + + REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, + 0x20); +} + +void dmub_dcn21_setup_windows(struct dmub_srv *dmub, + const struct dmub_window *cw2, + const struct dmub_window *cw3, + const struct dmub_window *cw4, + const struct dmub_window *cw5) +{ + union dmub_addr offset; + uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; + + dmub_dcn21_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); + REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, + DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, + DMCUB_REGION3_CW2_ENABLE, 1); + + dmub_dcn21_translate_addr(&cw3->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); + REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, + DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, + DMCUB_REGION3_CW3_ENABLE, 1); + + /* TODO: Move this to CW4. */ + dmub_dcn21_translate_addr(&cw4->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part); + REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, + cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, + 1); + + dmub_dcn21_translate_addr(&cw5->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); + REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, + DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, + DMCUB_REGION3_CW5_ENABLE, 1); +} + +bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub) +{ + return (REG_READ(DMCUB_SCRATCH0) == 3); +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h new file mode 100644 index 000000000000..f57969d8d56f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h @@ -0,0 +1,45 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DCN21_H_ +#define _DMUB_DCN21_H_ + +#include "dmub_dcn20.h" + +/* Hardware functions. */ + +void dmub_dcn21_backdoor_load(struct dmub_srv *dmub, + const struct dmub_window *cw0, + const struct dmub_window *cw1); + +void dmub_dcn21_setup_windows(struct dmub_srv *dmub, + const struct dmub_window *cw2, + const struct dmub_window *cw3, + const struct dmub_window *cw4, + const struct dmub_window *cw5); + +bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub); + +#endif /* _DMUB_DCN21_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c new file mode 100644 index 000000000000..4094eca212f0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c @@ -0,0 +1,109 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub_reg.h" +#include "../inc/dmub_srv.h" + +struct dmub_reg_value_masks { + uint32_t value; + uint32_t mask; +}; + +static inline void +set_reg_field_value_masks(struct dmub_reg_value_masks *field_value_mask, + uint32_t value, uint32_t mask, uint8_t shift) +{ + field_value_mask->value = + (field_value_mask->value & ~mask) | (mask & (value << shift)); + field_value_mask->mask = field_value_mask->mask | mask; +} + +static void set_reg_field_values(struct dmub_reg_value_masks *field_value_mask, + uint32_t addr, int n, uint8_t shift1, + uint32_t mask1, uint32_t field_value1, + va_list ap) +{ + uint32_t shift, mask, field_value; + int i = 1; + + /* gather all bits value/mask getting updated in this register */ + set_reg_field_value_masks(field_value_mask, field_value1, mask1, + shift1); + + while (i < n) { + shift = va_arg(ap, uint32_t); + mask = va_arg(ap, uint32_t); + field_value = va_arg(ap, uint32_t); + + set_reg_field_value_masks(field_value_mask, field_value, mask, + shift); + i++; + } +} + +static inline uint32_t get_reg_field_value_ex(uint32_t reg_value, uint32_t mask, + uint8_t shift) +{ + return (mask & reg_value) >> shift; +} + +void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1, + uint32_t mask1, uint32_t field_value1, ...) +{ + struct dmub_reg_value_masks field_value_mask = { 0 }; + uint32_t reg_val; + va_list ap; + + va_start(ap, field_value1); + set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, + field_value1, ap); + va_end(ap); + + reg_val = srv->funcs.reg_read(srv->user_ctx, addr); + reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; + srv->funcs.reg_write(srv->user_ctx, addr, reg_val); +} + +void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n, + uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...) +{ + struct dmub_reg_value_masks field_value_mask = { 0 }; + va_list ap; + + va_start(ap, field_value1); + set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, + field_value1, ap); + va_end(ap); + + reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; + srv->funcs.reg_write(srv->user_ctx, addr, reg_val); +} + +void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift, + uint32_t mask, uint32_t *field_value) +{ + uint32_t reg_val = srv->funcs.reg_read(srv->user_ctx, addr); + *field_value = get_reg_field_value_ex(reg_val, mask, shift); +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h new file mode 100644 index 000000000000..bac4ee8f745f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h @@ -0,0 +1,120 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_REG_H_ +#define _DMUB_REG_H_ + +#include "../inc/dmub_types.h" + +struct dmub_srv; + +/* Register offset and field lookup. */ + +#define BASE(seg) BASE_INNER(seg) + +#define REG_OFFSET(base_index, addr) (BASE(base_index) + addr) + +#define REG(reg_name) REG_OFFSET(mm ## reg_name ## _BASE_IDX, mm ## reg_name) + +#define FD(reg_field) reg_field ## __SHIFT, reg_field ## _MASK + +#define FN(reg_name, field) FD(reg_name##__##field) + +/* Register reads and writes. */ + +#define REG_READ(reg) ((CTX)->funcs.reg_read((CTX)->user_ctx, REG(reg))) + +#define REG_WRITE(reg, val) \ + ((CTX)->funcs.reg_write((CTX)->user_ctx, REG(reg), (val))) + +/* Register field setting. */ + +#define REG_SET_N(reg_name, n, initial_val, ...) \ + dmub_reg_set(CTX, REG(reg_name), initial_val, n, __VA_ARGS__) + +#define REG_SET(reg_name, initial_val, field, val) \ + REG_SET_N(reg_name, 1, initial_val, \ + FN(reg_name, field), val) + +#define REG_SET_2(reg, init_value, f1, v1, f2, v2) \ + REG_SET_N(reg, 2, init_value, \ + FN(reg, f1), v1, \ + FN(reg, f2), v2) + +#define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \ + REG_SET_N(reg, 3, init_value, \ + FN(reg, f1), v1, \ + FN(reg, f2), v2, \ + FN(reg, f3), v3) + +#define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \ + REG_SET_N(reg, 4, init_value, \ + FN(reg, f1), v1, \ + FN(reg, f2), v2, \ + FN(reg, f3), v3, \ + FN(reg, f4), v4) + +/* Register field updating. */ + +#define REG_UPDATE_N(reg_name, n, ...)\ + dmub_reg_update(CTX, REG(reg_name), n, __VA_ARGS__) + +#define REG_UPDATE(reg_name, field, val) \ + REG_UPDATE_N(reg_name, 1, \ + FN(reg_name, field), val) + +#define REG_UPDATE_2(reg, f1, v1, f2, v2) \ + REG_UPDATE_N(reg, 2,\ + FN(reg, f1), v1,\ + FN(reg, f2), v2) + +#define REG_UPDATE_3(reg, f1, v1, f2, v2, f3, v3) \ + REG_UPDATE_N(reg, 3, \ + FN(reg, f1), v1, \ + FN(reg, f2), v2, \ + FN(reg, f3), v3) + +#define REG_UPDATE_4(reg, f1, v1, f2, v2, f3, v3, f4, v4) \ + REG_UPDATE_N(reg, 4, \ + FN(reg, f1), v1, \ + FN(reg, f2), v2, \ + FN(reg, f3), v3, \ + FN(reg, f4), v4) + +/* Register field getting. */ + +#define REG_GET(reg_name, field, val) \ + dmub_reg_get(CTX, REG(reg_name), FN(reg_name, field), val) + +void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n, + uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); + +void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1, + uint32_t mask1, uint32_t field_value1, ...); + +void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift, + uint32_t mask, uint32_t *field_value); + +#endif /* _DMUB_REG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c new file mode 100644 index 000000000000..229eab7277d1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -0,0 +1,415 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../inc/dmub_srv.h" +#include "dmub_dcn20.h" +#include "dmub_dcn21.h" +/* + * Note: the DMUB service is standalone. No additional headers should be + * added below or above this line unless they reside within the DMUB + * folder. + */ + +/* Alignment for framebuffer memory. */ +#define DMUB_FB_ALIGNMENT (1024 * 1024) + +/* Stack size. */ +#define DMUB_STACK_SIZE (128 * 1024) + +/* Context size. */ +#define DMUB_CONTEXT_SIZE (512 * 1024) + +/* Mailbox size */ +#define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE) + +/* Tracebuffer size */ +#define DMUB_TRACEBUFF_SIZE (1024) //1kB buffer + +/* Number of windows in use. */ +#define DMUB_NUM_WINDOWS (DMUB_WINDOW_5_TRACEBUFF + 1) +/* Base addresses. */ + +#define DMUB_CW0_BASE (0x60000000) +#define DMUB_CW1_BASE (0x61000000) +#define DMUB_CW5_BASE (0x65000000) + +static inline uint32_t dmub_align(uint32_t val, uint32_t factor) +{ + return (val + factor - 1) / factor * factor; +} + +static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) +{ + struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; + + switch (asic) { + case DMUB_ASIC_DCN20: + case DMUB_ASIC_DCN21: + funcs->reset = dmub_dcn20_reset; + funcs->reset_release = dmub_dcn20_reset_release; + funcs->backdoor_load = dmub_dcn20_backdoor_load; + funcs->setup_windows = dmub_dcn20_setup_windows; + funcs->setup_mailbox = dmub_dcn20_setup_mailbox; + funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; + funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; + funcs->is_supported = dmub_dcn20_is_supported; + funcs->is_phy_init = dmub_dcn20_is_phy_init; + + if (asic == DMUB_ASIC_DCN21) { + funcs->backdoor_load = dmub_dcn21_backdoor_load; + funcs->setup_windows = dmub_dcn21_setup_windows; + funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done; + } + break; + + default: + return false; + } + + return true; +} + +enum dmub_status dmub_srv_create(struct dmub_srv *dmub, + const struct dmub_srv_create_params *params) +{ + enum dmub_status status = DMUB_STATUS_OK; + + dmub_memset(dmub, 0, sizeof(*dmub)); + + dmub->funcs = params->funcs; + dmub->user_ctx = params->user_ctx; + dmub->asic = params->asic; + dmub->is_virtual = params->is_virtual; + + /* Setup asic dependent hardware funcs. */ + if (!dmub_srv_hw_setup(dmub, params->asic)) { + status = DMUB_STATUS_INVALID; + goto cleanup; + } + + /* Override (some) hardware funcs based on user params. */ + if (params->hw_funcs) { + if (params->hw_funcs->get_inbox1_rptr) + dmub->hw_funcs.get_inbox1_rptr = + params->hw_funcs->get_inbox1_rptr; + + if (params->hw_funcs->set_inbox1_wptr) + dmub->hw_funcs.set_inbox1_wptr = + params->hw_funcs->set_inbox1_wptr; + + if (params->hw_funcs->is_supported) + dmub->hw_funcs.is_supported = + params->hw_funcs->is_supported; + } + + /* Sanity checks for required hw func pointers. */ + if (!dmub->hw_funcs.get_inbox1_rptr || + !dmub->hw_funcs.set_inbox1_wptr) { + status = DMUB_STATUS_INVALID; + goto cleanup; + } + +cleanup: + if (status == DMUB_STATUS_OK) + dmub->sw_init = true; + else + dmub_srv_destroy(dmub); + + return status; +} + +void dmub_srv_destroy(struct dmub_srv *dmub) +{ + dmub_memset(dmub, 0, sizeof(*dmub)); +} + +enum dmub_status +dmub_srv_calc_region_info(struct dmub_srv *dmub, + const struct dmub_srv_region_params *params, + struct dmub_srv_region_info *out) +{ + struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST]; + struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK]; + struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA]; + struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; + struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; + struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + memset(out, 0, sizeof(*out)); + + out->num_regions = DMUB_NUM_WINDOWS; + + inst->base = 0x0; + inst->top = inst->base + params->inst_const_size; + + data->base = dmub_align(inst->top, 256); + data->top = data->base + params->bss_data_size; + + stack->base = dmub_align(data->top, 256); + stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; + + bios->base = dmub_align(stack->top, 256); + bios->top = bios->base + params->vbios_size; + + mail->base = dmub_align(bios->top, 256); + mail->top = mail->base + DMUB_MAILBOX_SIZE; + + trace_buff->base = dmub_align(mail->top, 256); + trace_buff->top = trace_buff->base + DMUB_TRACEBUFF_SIZE; + + out->fb_size = dmub_align(trace_buff->top, 4096); + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, + const struct dmub_srv_fb_params *params, + struct dmub_srv_fb_info *out) +{ + uint8_t *cpu_base; + uint64_t gpu_base; + uint32_t i; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + memset(out, 0, sizeof(*out)); + + if (params->region_info->num_regions != DMUB_NUM_WINDOWS) + return DMUB_STATUS_INVALID; + + cpu_base = (uint8_t *)params->cpu_addr; + gpu_base = params->gpu_addr; + + for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { + const struct dmub_region *reg = + ¶ms->region_info->regions[i]; + + out->fb[i].cpu_addr = cpu_base + reg->base; + out->fb[i].gpu_addr = gpu_base + reg->base; + out->fb[i].size = reg->top - reg->base; + } + + out->num_fb = DMUB_NUM_WINDOWS; + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, + bool *is_supported) +{ + *is_supported = false; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.is_supported) + *is_supported = dmub->hw_funcs.is_supported(dmub); + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, + const struct dmub_srv_hw_params *params) +{ + struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST]; + struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK]; + struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA]; + struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS]; + struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; + struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; + + struct dmub_rb_init_params rb_params; + struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5; + struct dmub_region inbox1; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + dmub->fb_base = params->fb_base; + dmub->fb_offset = params->fb_offset; + dmub->psp_version = params->psp_version; + + if (inst_fb && data_fb) { + cw0.offset.quad_part = inst_fb->gpu_addr; + cw0.region.base = DMUB_CW0_BASE; + cw0.region.top = cw0.region.base + inst_fb->size - 1; + + cw1.offset.quad_part = stack_fb->gpu_addr; + cw1.region.base = DMUB_CW1_BASE; + cw1.region.top = cw1.region.base + stack_fb->size - 1; + + if (dmub->hw_funcs.backdoor_load) + dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); + } + + if (dmub->hw_funcs.reset) + dmub->hw_funcs.reset(dmub); + + if (inst_fb && data_fb && bios_fb && mail_fb) { + cw2.offset.quad_part = data_fb->gpu_addr; + cw2.region.base = DMUB_CW0_BASE + inst_fb->size; + cw2.region.top = cw2.region.base + data_fb->size; + + cw3.offset.quad_part = bios_fb->gpu_addr; + cw3.region.base = DMUB_CW1_BASE + stack_fb->size; + cw3.region.top = cw3.region.base + bios_fb->size; + + cw4.offset.quad_part = mail_fb->gpu_addr; + cw4.region.base = cw3.region.top + 1; + cw4.region.top = cw4.region.base + mail_fb->size; + + inbox1.base = cw4.region.base; + inbox1.top = cw4.region.top; + + cw5.offset.quad_part = tracebuff_fb->gpu_addr; + cw5.region.base = DMUB_CW5_BASE; + cw5.region.top = cw5.region.base + tracebuff_fb->size; + + if (dmub->hw_funcs.setup_windows) + dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5); + + if (dmub->hw_funcs.setup_mailbox) + dmub->hw_funcs.setup_mailbox(dmub, &inbox1); + } + + if (mail_fb) { + dmub_memset(&rb_params, 0, sizeof(rb_params)); + rb_params.ctx = dmub; + rb_params.base_address = mail_fb->cpu_addr; + rb_params.capacity = DMUB_RB_SIZE; + + dmub_rb_init(&dmub->inbox1_rb, &rb_params); + } + + if (dmub->hw_funcs.reset_release) + dmub->hw_funcs.reset_release(dmub); + + dmub->hw_init = true; + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, + const struct dmub_cmd_header *cmd) +{ + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) + return DMUB_STATUS_OK; + + return DMUB_STATUS_QUEUE_FULL; +} + +enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) +{ + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_cmd_submit(struct dmub_srv *dmub, + const struct dmub_cmd_header *cmd, + uint32_t timeout_us) +{ + uint32_t i = 0; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + for (i = 0; i <= timeout_us; ++i) { + dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) { + dmub->hw_funcs.set_inbox1_wptr(dmub, + dmub->inbox1_rb.wrpt); + return DMUB_STATUS_OK; + } + + udelay(1); + } + + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, + uint32_t timeout_us) +{ + uint32_t i; + + if (!dmub->hw_init || !dmub->hw_funcs.is_auto_load_done) + return DMUB_STATUS_INVALID; + + for (i = 0; i <= timeout_us; i += 100) { + if (dmub->hw_funcs.is_auto_load_done(dmub)) + return DMUB_STATUS_OK; + + udelay(100); + } + + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, + uint32_t timeout_us) +{ + uint32_t i; + + if (!dmub->hw_init || !dmub->hw_funcs.is_phy_init) + return DMUB_STATUS_INVALID; + + for (i = 0; i <= timeout_us; i += 10) { + if (dmub->hw_funcs.is_phy_init(dmub)) + return DMUB_STATUS_OK; + + udelay(10); + } + + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, + uint32_t timeout_us) +{ + uint32_t i; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + for (i = 0; i <= timeout_us; ++i) { + dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + if (dmub_rb_empty(&dmub->inbox1_rb)) + return DMUB_STATUS_OK; + + udelay(1); + } + + return DMUB_STATUS_TIMEOUT; +} -- cgit From a7e3658e9430c19bc7b87949fb826e49ec0465b4 Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Sat, 12 Oct 2019 16:06:19 -0400 Subject: drm/amd/display: Change dmcu init sequence for dmcub loading dmcu FW. [Why] DMCU isn't intiliazed properly by dmcub loading due to dmcub initialize sequence. [How] Change dmcu init sequece to meet dmcub initilize. Signed-off-by: Yongqiang Sun Reviewed-by: Tony Cheng Acked-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 79 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h | 13 ++++ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +- .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 4 +- drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h | 2 + 5 files changed, 97 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index ba995d3f2318..3417100d51e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -59,6 +59,12 @@ #define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L +// PSP FW version +#define mmMP0_SMN_C2PMSG_58 0x1607A + +//Register access policy version +#define mmMP0_SMN_C2PMSG_91 0x1609B + static bool dce_dmcu_init(struct dmcu *dmcu) { // Do nothing @@ -373,6 +379,7 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) const struct dc_config *config = &dmcu->ctx->dc->config; bool status = false; + PERF_TRACE(); /* Definition of DC_DMCU_SCRATCH * 0 : firmare not loaded * 1 : PSP load DMCU FW but not initialized @@ -429,9 +436,23 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) break; } + PERF_TRACE(); return status; } +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) +static bool dcn21_dmcu_init(struct dmcu *dmcu) +{ + struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); + uint32_t dmcub_psp_version = REG_READ(DMCUB_SCRATCH15); + + if (dmcu->auto_load_dmcu && dmcub_psp_version == 0) { + return false; + } + + return dcn10_dmcu_init(dmcu); +} +#endif static bool dcn10_dmcu_load_iram(struct dmcu *dmcu, unsigned int start_offset, @@ -818,6 +839,21 @@ static const struct dmcu_funcs dcn20_funcs = { }; #endif +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) +static const struct dmcu_funcs dcn21_funcs = { + .dmcu_init = dcn21_dmcu_init, + .load_iram = dcn10_dmcu_load_iram, + .set_psr_enable = dcn10_dmcu_set_psr_enable, + .setup_psr = dcn10_dmcu_setup_psr, + .get_psr_state = dcn10_get_dmcu_psr_state, + .set_psr_wait_loop = dcn10_psr_wait_loop, + .get_psr_wait_loop = dcn10_get_psr_wait_loop, + .is_dmcu_initialized = dcn10_is_dmcu_initialized, + .lock_phy = dcn20_lock_phy, + .unlock_phy = dcn20_unlock_phy +}; +#endif + static void dce_dmcu_construct( struct dce_dmcu *dmcu_dce, struct dc_context *ctx, @@ -836,6 +872,26 @@ static void dce_dmcu_construct( dmcu_dce->dmcu_mask = dmcu_mask; } +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) +static void dcn21_dmcu_construct( + struct dce_dmcu *dmcu_dce, + struct dc_context *ctx, + const struct dce_dmcu_registers *regs, + const struct dce_dmcu_shift *dmcu_shift, + const struct dce_dmcu_mask *dmcu_mask) +{ + uint32_t psp_version = 0; + + dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); + + if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { + psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); + dmcu_dce->base.auto_load_dmcu = (psp_version > 0x00110029); + dmcu_dce->base.psp_version = psp_version; + } +} +#endif + struct dmcu *dce_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, @@ -903,6 +959,29 @@ struct dmcu *dcn20_dmcu_create( } #endif +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) +struct dmcu *dcn21_dmcu_create( + struct dc_context *ctx, + const struct dce_dmcu_registers *regs, + const struct dce_dmcu_shift *dmcu_shift, + const struct dce_dmcu_mask *dmcu_mask) +{ + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); + + if (dmcu_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn21_dmcu_construct( + dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); + + dmcu_dce->base.funcs = &dcn21_funcs; + + return &dmcu_dce->base; +} +#endif + void dce_dmcu_destroy(struct dmcu **dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h index cc8587683b4b..1a42b2cbb21b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h @@ -71,6 +71,10 @@ DMCU_COMMON_REG_LIST_DCE_BASE(), \ SR(DMU_MEM_PWR_CNTL) +#define DMCU_DCN20_REG_LIST()\ + DMCU_DCN10_REG_LIST(), \ + SR(DMCUB_SCRATCH15) + #define DMCU_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -175,6 +179,7 @@ struct dce_dmcu_registers { uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK; uint32_t SMU_INTERRUPT_CONTROL; uint32_t DC_DMCU_SCRATCH; + uint32_t DMCUB_SCRATCH15; }; struct dce_dmcu { @@ -269,6 +274,14 @@ struct dmcu *dcn20_dmcu_create( const struct dce_dmcu_mask *dmcu_mask); #endif +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) +struct dmcu *dcn21_dmcu_create( + struct dc_context *ctx, + const struct dce_dmcu_registers *regs, + const struct dce_dmcu_shift *dmcu_shift, + const struct dce_dmcu_mask *dmcu_mask); +#endif + void dce_dmcu_destroy(struct dmcu **dmcu); static const uint32_t abm_gain_stepsize = 0x0060; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index eb91432621ab..32844cd50d09 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1285,7 +1285,7 @@ static void dcn10_init_hw(struct dc *dc) abm->funcs->abm_init(abm); } - if (dmcu != NULL) + if (dmcu != NULL && !dmcu->auto_load_dmcu) dmcu->funcs->dmcu_init(dmcu); if (abm != NULL && dmcu != NULL) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 1042197f1859..ae1a250c2f7d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -350,7 +350,7 @@ static const struct bios_registers bios_regs = { }; static const struct dce_dmcu_registers dmcu_regs = { - DMCU_DCN10_REG_LIST() + DMCU_DCN20_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { @@ -1727,7 +1727,7 @@ static bool construct( goto create_fail; } - pool->base.dmcu = dcn20_dmcu_create(ctx, + pool->base.dmcu = dcn21_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index c68f0ce346c7..5315f1f86b21 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h @@ -52,6 +52,8 @@ struct dmcu { enum dmcu_state dmcu_state; struct dmcu_version dmcu_version; unsigned int cached_wait_loop_number; + uint32_t psp_version; + bool auto_load_dmcu; }; struct dmcu_funcs { -- cgit From 28bcdb8ee9a47fd03faf5da19015a2d7ea907585 Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Thu, 17 Oct 2019 21:44:50 -0400 Subject: drm/amd/display: Add PSP FW version mask. [Why] PSP version format is AB.CD.EF.GH, where CD and GH is the main version. current psp version check for dmcub loading dmcu check 0x00110029, in case of some psp version eg: 0x00110227 which main version should be 0x00110027, will result in unexpeceted dmcub loading dmcu FW. [How] Add psp version mask 0x00FF00FF for checking version. Signed-off-by: Yongqiang Sun Reviewed-by: Tony Cheng Acked-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 3417100d51e4..3276944e6997 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -886,7 +886,7 @@ static void dcn21_dmcu_construct( if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); - dmcu_dce->base.auto_load_dmcu = (psp_version > 0x00110029); + dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); dmcu_dce->base.psp_version = psp_version; } } -- cgit From 743b9786b14ae0d7d13b3782dccad158e577e9bb Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 24 Oct 2019 20:38:48 -0400 Subject: drm/amd/display: Hook up the DMUB service in DM [Why] We need DMCUB on Renoir to support DMCU and PHY initialization. The DMUB service provides a mechanism to load the DMCUB. [How] Include the DMUB service in amdgpu_dm. Frontdoor loading of the DMCUB firmware needs to happen via PSP. To pass the firmware to PSP we need to hand it off to the firmware list in the base driver during software initialization. Most of the DMUB service can technically be initialized at this point in time, but we don't want to be allocating framebuffer memory for hardware that doesn't support the DMCUB and in order to check that we need to be able to read registers - something DM helpers aren't setup to do in software initialization. So everything but the service creation itself will get deferred to hardware initialization. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 267 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 50 ++++ 2 files changed, 317 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6ec8473f2cbd..26b61845b81a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -30,6 +30,11 @@ #include "dc.h" #include "dc/inc/core_types.h" #include "dal_asic_id.h" +#ifdef CONFIG_DRM_AMD_DC_DMUB +#include "dmub/inc/dmub_srv.h" +#include "dc/inc/hw/dmcu.h" +#include "dc/inc/hw/abm.h" +#endif #include "vid.h" #include "amdgpu.h" @@ -87,6 +92,10 @@ #include "modules/power/power_helpers.h" #include "modules/inc/mod_info_packet.h" +#ifdef CONFIG_DRM_AMD_DC_DMUB +#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); +#endif #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); @@ -667,12 +676,149 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) } } +#ifdef CONFIG_DRM_AMD_DC_DMUB +static int dm_dmub_hw_init(struct amdgpu_device *adev) +{ + const unsigned int psp_header_bytes = 0x100; + const unsigned int psp_footer_bytes = 0x100; + const struct dmcub_firmware_header_v1_0 *hdr; + struct dmub_srv *dmub_srv = adev->dm.dmub_srv; + const struct firmware *dmub_fw = adev->dm.dmub_fw; + struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; + struct abm *abm = adev->dm.dc->res_pool->abm; + struct dmub_srv_region_params region_params; + struct dmub_srv_region_info region_info; + struct dmub_srv_fb_params fb_params; + struct dmub_srv_fb_info fb_info; + struct dmub_srv_hw_params hw_params; + enum dmub_status status; + const unsigned char *fw_inst_const, *fw_bss_data; + uint32_t i; + int r; + bool has_hw_support; + + if (!dmub_srv) + /* DMUB isn't supported on the ASIC. */ + return 0; + + if (!dmub_fw) { + /* Firmware required for DMUB support. */ + DRM_ERROR("No firmware provided for DMUB.\n"); + return -EINVAL; + } + + status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error checking HW support for DMUB: %d\n", status); + return -EINVAL; + } + + if (!has_hw_support) { + DRM_INFO("DMUB unsupported on ASIC\n"); + return 0; + } + + hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; + + /* Calculate the size of all the regions for the DMUB service. */ + memset(®ion_params, 0, sizeof(region_params)); + + region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - + psp_header_bytes - psp_footer_bytes; + region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); + region_params.vbios_size = adev->dm.dc->ctx->dc_bios->bios_size; + + status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, + ®ion_info); + + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error calculating DMUB region info: %d\n", status); + return -EINVAL; + } + + /* + * Allocate a framebuffer based on the total size of all the regions. + * TODO: Move this into GART. + */ + r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, + &adev->dm.dmub_bo_gpu_addr, + &adev->dm.dmub_bo_cpu_addr); + if (r) + return r; + + /* Rebase the regions on the framebuffer address. */ + memset(&fb_params, 0, sizeof(fb_params)); + fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; + fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; + fb_params.region_info = ®ion_info; + + status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, &fb_info); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error calculating DMUB FB info: %d\n", status); + return -EINVAL; + } + + fw_inst_const = dmub_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + psp_header_bytes; + + fw_bss_data = dmub_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + le32_to_cpu(hdr->inst_const_bytes); + + /* Copy firmware and bios info into FB memory. */ + memcpy(fb_info.fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, + region_params.inst_const_size); + memcpy(fb_info.fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, + region_params.bss_data_size); + memcpy(fb_info.fb[DMUB_WINDOW_3_VBIOS].cpu_addr, + adev->dm.dc->ctx->dc_bios->bios, region_params.vbios_size); + + /* Initialize hardware. */ + memset(&hw_params, 0, sizeof(hw_params)); + hw_params.fb_base = adev->gmc.fb_start; + hw_params.fb_offset = adev->gmc.aper_base; + + if (dmcu) + hw_params.psp_version = dmcu->psp_version; + + for (i = 0; i < fb_info.num_fb; ++i) + hw_params.fb[i] = &fb_info.fb[i]; + + status = dmub_srv_hw_init(dmub_srv, &hw_params); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error initializing DMUB HW: %d\n", status); + return -EINVAL; + } + + /* Wait for firmware load to finish. */ + status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); + if (status != DMUB_STATUS_OK) + DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + + /* Init DMCU and ABM if available. */ + if (dmcu && abm) { + dmcu->funcs->dmcu_init(dmcu); + abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); + } + + DRM_INFO("DMUB hardware initialized: version=0x%08X\n", + adev->dm.dmcub_fw_version); + + return 0; +} + +#endif static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; #ifdef CONFIG_DRM_AMD_DC_HDCP struct dc_callback_init init_params; #endif +#ifdef CONFIG_DRM_AMD_DC_DMUB + int r; +#endif adev->dm.ddev = adev->ddev; adev->dm.adev = adev; @@ -749,6 +895,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_hardware_init(adev->dm.dc); +#ifdef CONFIG_DRM_AMD_DC_DMUB + r = dm_dmub_hw_init(adev); + if (r) { + DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + goto error; + } + +#endif adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -821,6 +975,12 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) if (adev->dm.dc) dc_deinit_callbacks(adev->dm.dc); #endif +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (adev->dm.dmub_bo) + amdgpu_bo_free_kernel(&adev->dm.dmub_bo, + &adev->dm.dmub_bo_gpu_addr, + &adev->dm.dmub_bo_cpu_addr); +#endif /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) @@ -932,9 +1092,104 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; } +#ifdef CONFIG_DRM_AMD_DC_DMUB +static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) +{ + struct amdgpu_device *adev = ctx; + + return dm_read_reg(adev->dm.dc->ctx, address); +} + +static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, + uint32_t value) +{ + struct amdgpu_device *adev = ctx; + + return dm_write_reg(adev->dm.dc->ctx, address, value); +} + +static int dm_dmub_sw_init(struct amdgpu_device *adev) +{ + struct dmub_srv_create_params create_params; + const struct dmcub_firmware_header_v1_0 *hdr; + const char *fw_name_dmub; + enum dmub_asic dmub_asic; + enum dmub_status status; + int r; + + switch (adev->asic_type) { + case CHIP_RENOIR: + dmub_asic = DMUB_ASIC_DCN21; + fw_name_dmub = FIRMWARE_RENOIR_DMUB; + break; + + default: + /* ASIC doesn't support DMUB. */ + return 0; + } + + adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); + if (!adev->dm.dmub_srv) { + DRM_ERROR("Failed to allocate DMUB service!\n"); + return -ENOMEM; + } + + memset(&create_params, 0, sizeof(create_params)); + create_params.user_ctx = adev; + create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; + create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; + create_params.asic = dmub_asic; + + status = dmub_srv_create(adev->dm.dmub_srv, &create_params); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error creating DMUB service: %d\n", status); + return -EINVAL; + } + + r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); + if (r) { + DRM_ERROR("DMUB firmware loading failed: %d\n", r); + return 0; + } + + r = amdgpu_ucode_validate(adev->dm.dmub_fw); + if (r) { + DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); + return 0; + } + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + DRM_WARN("Only PSP firmware loading is supported for DMUB\n"); + return 0; + } + + hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = + AMDGPU_UCODE_ID_DMCUB; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); + + adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); + + DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", + adev->dm.dmcub_fw_version); + + return 0; +} + +#endif static int dm_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; +#ifdef CONFIG_DRM_AMD_DC_DMUB + int r; + + r = dm_dmub_sw_init(adev); + if (r) + return r; + +#endif return load_dmcu_fw(adev); } @@ -943,6 +1198,18 @@ static int dm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (adev->dm.dmub_srv) { + dmub_srv_destroy(adev->dm.dmub_srv); + adev->dm.dmub_srv = NULL; + } + + if (adev->dm.dmub_fw) { + release_firmware(adev->dm.dmub_fw); + adev->dm.dmub_fw = NULL; + } + +#endif if(adev->dm.fw_dmcu) { release_firmware(adev->dm.fw_dmcu); adev->dm.fw_dmcu = NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index d26bb591cefe..c636a958e377 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -57,6 +57,10 @@ struct amdgpu_device; struct drm_device; struct amdgpu_dm_irq_handler_data; struct dc; +#ifdef CONFIG_DRM_AMD_DC_DMUB +struct amdgpu_bo; +struct dmub_srv; +#endif struct common_irq_params { struct amdgpu_device *adev; @@ -121,6 +125,52 @@ struct amdgpu_display_manager { struct dc *dc; +#ifdef CONFIG_DRM_AMD_DC_DMUB + /** + * @dmub_srv: + * + * DMUB service, used for controlling the DMUB on hardware + * that supports it. The pointer to the dmub_srv will be + * NULL on hardware that does not support it. + */ + struct dmub_srv *dmub_srv; + + /** + * @dmub_fw: + * + * DMUB firmware, required on hardware that has DMUB support. + */ + const struct firmware *dmub_fw; + + /** + * @dmub_bo: + * + * Buffer object for the DMUB. + */ + struct amdgpu_bo *dmub_bo; + + /** + * @dmub_bo_gpu_addr: + * + * GPU virtual address for the DMUB buffer object. + */ + u64 dmub_bo_gpu_addr; + + /** + * @dmub_bo_cpu_addr: + * + * CPU address for the DMUB buffer object. + */ + void *dmub_bo_cpu_addr; + + /** + * @dmcub_fw_version: + * + * DMCUB firmware version. + */ + uint32_t dmcub_fw_version; + +#endif /** * @cgs_device: * -- cgit From 976e51a7c0827a870fa08df6ee0a74e937ebbbd9 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Fri, 25 Oct 2019 14:15:08 -0400 Subject: drm/amdgpu: Add DMCUB to firmware query interface The DMCUB firmware version can be read using the AMDGPU_INFO ioctl or the amdgpu_firmware_info debugfs entry. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 82c46c4faaad..3827dcf7e48d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -293,6 +293,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->ver = adev->dm.dmcu_fw_version; fw_info->feature = 0; break; + case AMDGPU_INFO_FW_DMCUB: + fw_info->ver = adev->dm.dmcub_fw_version; + fw_info->feature = 0; + break; default: return -EINVAL; } @@ -1390,6 +1394,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); + /* DMCUB */ + query_fw.fw_type = AMDGPU_INFO_FW_DMCUB; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); -- cgit From 3a1627b07385a6bb497f7ca4e2ffe1e1dbc70b68 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Fri, 25 Oct 2019 15:03:58 -0400 Subject: drm/amd/display: Add DMUB support to DC DC will use DMUB for command submission and flow control during initialization. Register offloading as well as submitting some BIOS commands are part of the DC internal interface but are guarded behind debug options. It won't be functional in amdgpu_dm yet since we don't pass the DMUB service to DC for use. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/Makefile | 6 +- .../gpu/drm/amd/display/dc/bios/command_table2.c | 91 +++++++ drivers/gpu/drm/amd/display/dc/core/dc.c | 8 + drivers/gpu/drm/amd/display/dc/dc.h | 12 + drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 119 +++++++++ drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h | 60 +++++ drivers/gpu/drm/amd/display/dc/dc_helper.c | 273 +++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc_types.h | 6 + .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c | 7 + drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 8 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c | 11 + .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 3 + drivers/gpu/drm/amd/display/dc/dm_services.h | 14 ++ drivers/gpu/drm/amd/display/dc/inc/reg_helper.h | 22 ++ drivers/gpu/drm/amd/display/dc/os_types.h | 1 + 15 files changed, 640 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c create mode 100644 drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index a160512a2f04..6fe39f6392c7 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -70,5 +70,9 @@ AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o) AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE) AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE) - +ifdef CONFIG_DRM_AMD_DC_DMUB +DC_DMUB += dc_dmub_srv.o +AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB)) +AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) +endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index bb2e8105e6ab..a3d890050e39 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -37,6 +37,10 @@ #include "bios_parser_types_internal2.h" #include "amdgpu.h" +#ifdef CONFIG_DRM_AMD_DC_DMUB +#include "dc_dmub_srv.h" +#include "dc.h" +#endif #define DC_LOGGER \ bp->base.ctx->logger @@ -103,6 +107,21 @@ static void init_dig_encoder_control(struct bios_parser *bp) } } +#ifdef CONFIG_DRM_AMD_DC_DMUB +static void encoder_control_dmcub( + struct dc_dmub_srv *dmcub, + struct dig_encoder_stream_setup_parameters_v1_5 *dig) +{ + struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 }; + + encoder_control.header.type = DMUB_CMD__DIGX_ENCODER_CONTROL; + encoder_control.encoder_control.dig.stream_param = *dig; + + dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} +#endif static enum bp_result encoder_control_digx_v1_5( struct bios_parser *bp, struct bp_encoder_control *cntl) @@ -154,6 +173,13 @@ static enum bp_result encoder_control_digx_v1_5( default: break; } +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + encoder_control_dmcub(bp->base.ctx->dmub_srv, ¶ms); + return BP_RESULT_OK; + } +#endif if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params)) result = BP_RESULT_OK; @@ -190,7 +216,21 @@ static void init_transmitter_control(struct bios_parser *bp) break; } } +#ifdef CONFIG_DRM_AMD_DC_DMUB +static void transmitter_control_dmcub( + struct dc_dmub_srv *dmcub, + struct dig_transmitter_control_parameters_v1_6 *dig) +{ + struct dmub_rb_cmd_dig1_transmitter_control transmitter_control; + + transmitter_control.header.type = DMUB_CMD__DIG1_TRANSMITTER_CONTROL; + transmitter_control.transmitter_control.dig = *dig; + dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} +#endif static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl) @@ -223,6 +263,14 @@ static enum bp_result transmitter_control_v1_6( } +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + transmitter_control_dmcub(bp->base.ctx->dmub_srv, &ps.param); + return BP_RESULT_OK; + } +#endif + /*color_depth not used any more, driver has deep color factor in the Phyclk*/ if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps)) result = BP_RESULT_OK; @@ -255,7 +303,21 @@ static void init_set_pixel_clock(struct bios_parser *bp) } } +#ifdef CONFIG_DRM_AMD_DC_DMUB +static void set_pixel_clock_dmcub( + struct dc_dmub_srv *dmcub, + struct set_pixel_clock_parameter_v1_7 *clk) +{ + struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 }; + pixel_clock.header.type = DMUB_CMD__SET_PIXEL_CLOCK; + pixel_clock.pixel_clock.clk = *clk; + + dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} +#endif static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, @@ -331,6 +393,13 @@ static enum bp_result set_pixel_clock_v7( if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN; +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + set_pixel_clock_dmcub(bp->base.ctx->dmub_srv, &clk); + return BP_RESULT_OK; + } +#endif if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk)) result = BP_RESULT_OK; } @@ -584,7 +653,21 @@ static void init_enable_disp_power_gating( break; } } +#ifdef CONFIG_DRM_AMD_DC_DMUB +static void enable_disp_power_gating_dmcub( + struct dc_dmub_srv *dmcub, + struct enable_disp_power_gating_parameters_v2_1 *pwr) +{ + struct dmub_rb_cmd_enable_disp_power_gating power_gating; + + power_gating.header.type = DMUB_CMD__ENABLE_DISP_POWER_GATING; + power_gating.power_gating.pwr = *pwr; + dc_dmub_srv_cmd_queue(dmcub, &power_gating.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} +#endif static enum bp_result enable_disp_power_gating_v2_1( struct bios_parser *bp, enum controller_id crtc_id, @@ -604,6 +687,14 @@ static enum bp_result enable_disp_power_gating_v2_1( ps.param.enable = bp->cmd_helper->disp_power_gating_action_to_atom(action); +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + enable_disp_power_gating_dmcub(bp->base.ctx->dmub_srv, + &ps.param); + return BP_RESULT_OK; + } +#endif if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param)) result = BP_RESULT_OK; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 32f31bf91915..2bf4da41577a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -59,6 +59,10 @@ #include "dc_link_dp.h" +#ifdef CONFIG_DRM_AMD_DC_DMUB +#include "dc_dmub_srv.h" +#endif + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" #endif @@ -2406,6 +2410,10 @@ void dc_set_power_state( switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: dc_resource_state_construct(dc, dc->current_state); +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (dc->ctx->dmub_srv) + dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); +#endif dc->hwss.init_hw(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 0416a17b0897..33828f03fe9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -112,6 +112,9 @@ struct dc_caps { bool disable_dp_clk_share; bool psp_setup_panel_mode; bool extended_aux_timeout_support; +#ifdef CONFIG_DRM_AMD_DC_DMUB + bool dmcub_support; +#endif #ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool hw_3d_lut; #endif @@ -401,6 +404,11 @@ struct dc_debug_options { unsigned int force_odm_combine; //bit vector based on otg inst unsigned int force_fclk_khz; bool disable_tri_buf; +#ifdef CONFIG_DRM_AMD_DC_DMUB + bool dmub_offload_enabled; + bool dmcub_emulation; + bool dmub_command_table; /* for testing only */ +#endif struct dc_bw_validation_profile bw_val_profile; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_fec; @@ -558,6 +566,10 @@ struct dc_init_data { struct dc_bios *vbios_override; enum dce_environment dce_environment; +#ifdef CONFIG_DRM_AMD_DC_DMUB + struct dmub_offload_funcs *dmub_if; + struct dc_reg_helper_state *dmub_offload; +#endif struct dc_config flags; uint32_t log_mask; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c new file mode 100644 index 000000000000..61cefe0a3790 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -0,0 +1,119 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "dc_dmub_srv.h" +#include "../dmub/inc/dmub_srv.h" + +static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, + struct dmub_srv *dmub) +{ + dc_srv->dmub = dmub; + dc_srv->ctx = dc->ctx; +} + +struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub) +{ + struct dc_dmub_srv *dc_srv = + kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL); + + if (dc_srv == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dc_dmub_srv_construct(dc_srv, dc, dmub); + + return dc_srv; +} + +void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) +{ + if (*dmub_srv) { + kfree(*dmub_srv); + *dmub_srv = NULL; + } +} + +void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, + struct dmub_cmd_header *cmd) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + status = dmub_srv_cmd_queue(dmub, cmd); + if (status == DMUB_STATUS_OK) + return; + + if (status != DMUB_STATUS_QUEUE_FULL) + goto error; + + /* Execute and wait for queue to become empty again. */ + dc_dmub_srv_cmd_execute(dc_dmub_srv); + dc_dmub_srv_wait_idle(dc_dmub_srv); + + /* Requeue the command. */ + status = dmub_srv_cmd_queue(dmub, cmd); + if (status == DMUB_STATUS_OK) + return; + +error: + DC_ERROR("Error queuing DMUB command: status=%d\n", status); +} + +void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + status = dmub_srv_cmd_execute(dmub); + if (status != DMUB_STATUS_OK) + DC_ERROR("Error starting DMUB exeuction: status=%d\n", status); +} + +void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + status = dmub_srv_wait_for_idle(dmub, 100000); + if (status != DMUB_STATUS_OK) + DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); +} + +void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + status = dmub_srv_wait_for_phy_init(dmub, 1000000); + if (status != DMUB_STATUS_OK) + DC_ERROR("Error waiting for DMUB phy init: status=%d\n", + status); +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h new file mode 100644 index 000000000000..754b6077539c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -0,0 +1,60 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DC_SRV_H_ +#define _DMUB_DC_SRV_H_ + +#include "os_types.h" +#include "../dmub/inc/dmub_cmd.h" + +struct dmub_srv; +struct dmub_cmd_header; + +struct dc_reg_helper_state { + bool gather_in_progress; + uint32_t same_addr_count; + bool should_burst_write; + union dmub_rb_cmd cmd_data; + unsigned int reg_seq_count; +}; + +struct dc_dmub_srv { + struct dmub_srv *dmub; + struct dc_reg_helper_state reg_helper_offload; + + struct dc_context *ctx; + void *dm; +}; + +void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, + struct dmub_cmd_header *cmd); + +void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv); + +void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); + +void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); + +#endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 30b2f9edd42f..59b4bbefb7a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -32,6 +32,76 @@ #include "dm_services.h" #include +#ifdef CONFIG_DRM_AMD_DC_DMUB +#include "dc.h" +#include "dc_dmub_srv.h" + +static inline void submit_dmub_read_modify_write( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; + bool gather = false; + + offload->should_burst_write = + (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); + cmd_buf->header.payload_bytes = + sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; + + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + + memset(cmd_buf, 0, sizeof(*cmd_buf)); + + offload->reg_seq_count = 0; + offload->same_addr_count = 0; +} + +static inline void submit_dmub_burst_write( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; + bool gather = false; + + cmd_buf->header.payload_bytes = + sizeof(uint32_t) * offload->reg_seq_count; + + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + + memset(cmd_buf, 0, sizeof(*cmd_buf)); + + offload->reg_seq_count = 0; +} + +static inline void submit_dmub_reg_wait( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; + bool gather = false; + + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + + memset(cmd_buf, 0, sizeof(*cmd_buf)); + offload->reg_seq_count = 0; + + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; +} +#endif + struct dc_reg_value_masks { uint32_t value; uint32_t mask; @@ -77,6 +147,100 @@ static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, } } +#ifdef CONFIG_DRM_AMD_DC_DMUB +static void dmub_flush_buffer_execute( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + submit_dmub_read_modify_write(offload, ctx); + dc_dmub_srv_cmd_execute(ctx->dmub_srv); +} + +static void dmub_flush_burst_write_buffer_execute( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + submit_dmub_burst_write(offload, ctx); + dc_dmub_srv_cmd_execute(ctx->dmub_srv); +} + +static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, + uint32_t reg_val) +{ + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; + struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; + + /* flush command if buffer is full */ + if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) + dmub_flush_burst_write_buffer_execute(offload, ctx); + + if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && + addr != cmd_buf->addr) { + dmub_flush_burst_write_buffer_execute(offload, ctx); + return false; + } + + cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; + cmd_buf->addr = addr; + cmd_buf->write_values[offload->reg_seq_count] = reg_val; + offload->reg_seq_count++; + + return true; +} + +static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, + struct dc_reg_value_masks *field_value_mask) +{ + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; + struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; + struct dmub_cmd_read_modify_write_sequence *seq; + + /* flush command if buffer is full */ + if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && + offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) + dmub_flush_buffer_execute(offload, ctx); + + if (offload->should_burst_write) { + if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value)) + return field_value_mask->value; + else + offload->should_burst_write = false; + } + + /* pack commands */ + cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; + seq = &cmd_buf->seq[offload->reg_seq_count]; + + if (offload->reg_seq_count) { + if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) + offload->same_addr_count++; + else + offload->same_addr_count = 0; + } + + seq->addr = addr; + seq->modify_mask = field_value_mask->mask; + seq->modify_value = field_value_mask->value; + offload->reg_seq_count++; + + return field_value_mask->value; +} + +static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, + uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) +{ + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; + struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; + + cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; + cmd_buf->reg_wait.addr = addr; + cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); + cmd_buf->reg_wait.mask = mask; + cmd_buf->reg_wait.time_out_us = time_out_us; +} + +#endif + uint32_t generic_reg_update_ex(const struct dc_context *ctx, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, @@ -93,6 +257,13 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx, va_end(ap); +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) + return dmub_reg_value_pack(ctx, addr, &field_value_mask); + /* todo: return void so we can decouple code running in driver from register states */ +#endif + /* mmio write directly */ reg_val = dm_read_reg(ctx, addr); reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; @@ -118,6 +289,13 @@ uint32_t generic_reg_set_ex(const struct dc_context *ctx, /* mmio write directly */ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { + return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); + /* todo: return void so we can decouple code running in driver from register states */ + } +#endif dm_write_reg(ctx, addr, reg_val); return reg_val; } @@ -134,6 +312,16 @@ uint32_t dm_read_reg_func( return 0; } #endif + +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress && + !ctx->dmub_srv->reg_helper_offload.should_burst_write) { + ASSERT(false); + return 0; + } +#endif + value = cgs_read_register(ctx->cgs_device, address); trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); @@ -299,6 +487,15 @@ void generic_reg_wait(const struct dc_context *ctx, uint32_t reg_val; int i; +#ifdef CONFIG_DRM_AMD_DC_DMUB + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { + dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, + delay_between_poll_us * time_out_num_tries); + return; + } +#endif + /* something is terribly wrong if time out is > 200ms. (5Hz) */ ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); @@ -345,6 +542,13 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx, uint32_t index) { uint32_t value = 0; +#ifdef CONFIG_DRM_AMD_DC_DMUB + // when reg read, there should not be any offload. + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { + ASSERT(false); + } +#endif dm_write_reg(ctx, addr_index, index); value = dm_read_reg(ctx, addr_data); @@ -382,3 +586,72 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, return reg_val; } + +#ifdef CONFIG_DRM_AMD_DC_DMUB +void reg_sequence_start_gather(const struct dc_context *ctx) +{ + /* if reg sequence is supported and enabled, set flag to + * indicate we want to have REG_SET, REG_UPDATE macro build + * reg sequence command buffer rather than MMIO directly. + */ + + if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { + struct dc_reg_helper_state *offload = + &ctx->dmub_srv->reg_helper_offload; + + /* caller sequence mismatch. need to debug caller. offload will not work!!! */ + ASSERT(!offload->gather_in_progress); + + offload->gather_in_progress = true; + } +} + +void reg_sequence_start_execute(const struct dc_context *ctx) +{ + struct dc_reg_helper_state *offload; + + if (!ctx->dmub_srv) + return; + + offload = &ctx->dmub_srv->reg_helper_offload; + + if (offload && offload->gather_in_progress) { + offload->gather_in_progress = false; + offload->should_burst_write = false; + switch (offload->cmd_data.cmd_common.header.type) { + case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: + submit_dmub_read_modify_write(offload, ctx); + break; + case DMUB_CMD__REG_REG_WAIT: + submit_dmub_reg_wait(offload, ctx); + break; + case DMUB_CMD__REG_SEQ_BURST_WRITE: + submit_dmub_burst_write(offload, ctx); + break; + default: + return; + } + + dc_dmub_srv_cmd_execute(ctx->dmub_srv); + } +} + +void reg_sequence_wait_done(const struct dc_context *ctx) +{ + /* callback to DM to poll for last submission done*/ + struct dc_reg_helper_state *offload; + + if (!ctx->dmub_srv) + return; + + offload = &ctx->dmub_srv->reg_helper_offload; + + if (offload && + ctx->dc->debug.dmub_offload_enabled && + !ctx->dc->debug.dmcub_emulation) { + dc_dmub_srv_wait_idle(ctx->dmub_srv); + } +} + + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index d9be8fc3889f..fb70ed9b351f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -48,6 +48,9 @@ struct dc_stream_state; struct dc_link; struct dc_sink; struct dal; +#ifdef CONFIG_DRM_AMD_DC_DMUB +struct dc_dmub_srv; +#endif /******************************** * Environment definitions @@ -109,6 +112,9 @@ struct dc_context { uint32_t dc_sink_id_count; uint32_t dc_stream_id_count; uint64_t fbc_gpu_addr; +#ifdef CONFIG_DRM_AMD_DC_DMUB + struct dc_dmub_srv *dmub_srv; +#endif #ifdef CONFIG_DRM_AMD_DC_HDCP struct cp_psp cp_psp; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index aa0c7a7d13a0..41a0e53d2ba4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -352,6 +352,9 @@ void dpp1_cm_program_regamma_lut(struct dpp *dpp_base, uint32_t i; struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); +#ifdef CONFIG_DRM_AMD_DC_DMUB + REG_SEQ_START(); +#endif for (i = 0 ; i < num; i++) { REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); @@ -630,6 +633,10 @@ void dpp1_set_degamma( BREAK_TO_DEBUGGER(); break; } +#ifdef CONFIG_DRM_AMD_DC_DMUB + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); +#endif } void dpp1_degamma_ram_select( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index dabccbd49ad4..8710f3ac2abf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -457,11 +457,19 @@ static bool optc1_enable_crtc(struct timing_generator *optc) REG_UPDATE(CONTROL, VTG0_ENABLE, 1); +#ifdef CONFIG_DRM_AMD_DC_DMUB + REG_SEQ_START(); +#endif /* Enable CRTC */ REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 3, OTG_MASTER_EN, 1); +#ifdef CONFIG_DRM_AMD_DC_DMUB + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); +#endif + return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 5a188b2bc033..2417d933ef2b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -345,6 +345,11 @@ static void mpc20_program_ogam_pwl( uint32_t i; struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); +#ifdef CONFIG_DRM_AMD_DC_DMUB + PERF_TRACE(); + REG_SEQ_START(); +#endif + for (i = 0 ; i < num; i++) { REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg); @@ -463,6 +468,12 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) ASSERT(!mpc_disabled); ASSERT(!mpc_idle); } +#ifdef CONFIG_DRM_AMD_DC_DMUB + REG_SEQ_SUBMIT(); + PERF_TRACE(); + REG_SEQ_WAIT_DONE(); + PERF_TRACE(); +#endif } static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index ae1a250c2f7d..eb7ce4a5cbac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1678,6 +1678,9 @@ static bool construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; +#ifdef CONFIG_DRM_AMD_DC_DMUB + dc->caps.dmcub_support = true; +#endif if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 1a0429744630..0a3891edfd94 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -40,6 +40,11 @@ #undef DEPRECATED +#ifdef CONFIG_DRM_AMD_DC_DMUB +struct dmub_srv; +struct dc_dmub_srv; + +#endif irq_handler_idx dm_register_interrupt( struct dc_context *ctx, struct dc_interrupt_params *int_params, @@ -139,6 +144,15 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); +#ifdef CONFIG_DRM_AMD_DC_DMUB +struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub); +void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv); + +void reg_sequence_start_gather(const struct dc_context *ctx); +void reg_sequence_start_execute(const struct dc_context *ctx); +void reg_sequence_wait_done(const struct dc_context *ctx); +#endif + #define FD(reg_field) reg_field ## __SHIFT, \ reg_field ## _MASK diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index 8503d9cc4763..a9a9657c095a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -485,4 +485,26 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); + +#ifdef CONFIG_DRM_AMD_DC_DMUB +/* register offload macros + * + * instead of MMIO to register directly, in some cases we want + * to gather register sequence and execute the register sequence + * from another thread so we optimize time required for lengthy ops + */ + +/* start gathering register sequence */ +#define REG_SEQ_START() \ + reg_sequence_start_gather(CTX) + +/* start execution of register sequence gathered since REG_SEQ_START */ +#define REG_SEQ_SUBMIT() \ + reg_sequence_start_execute(CTX) + +/* wait for the last REG_SEQ_SUBMIT to finish */ +#define REG_SEQ_WAIT_DONE() \ + reg_sequence_wait_done(CTX) +#endif + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index 30ec80ac6fc8..9a4b5bab03c6 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -29,6 +29,7 @@ #include #include #include +#include #include -- cgit From 9a71c7d31734f74549ad2bcd652c403c71e7c8d1 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 28 Oct 2019 09:07:30 -0400 Subject: drm/amd/display: Register DMUB service with DC [Why] DC can utilize the DMUB server to send commands to the DMUB but it's the DM responsibility to pass it the service to use. [How] Create the dc_dmub_srv after we finish initializing the dmub_srv. Cleanup the dc_dmub_srv before destroying the dmub_srv or dc. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 26b61845b81a..ee6ea333ba3d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -34,6 +34,7 @@ #include "dmub/inc/dmub_srv.h" #include "dc/inc/hw/dmcu.h" #include "dc/inc/hw/abm.h" +#include "dc/dc_dmub_srv.h" #endif #include "vid.h" @@ -803,6 +804,12 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); } + adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); + if (!adev->dm.dc->ctx->dmub_srv) { + DRM_ERROR("Couldn't allocate DC DMUB server!\n"); + return -ENOMEM; + } + DRM_INFO("DMUB hardware initialized: version=0x%08X\n", adev->dm.dmcub_fw_version); @@ -976,6 +983,11 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) dc_deinit_callbacks(adev->dm.dc); #endif #ifdef CONFIG_DRM_AMD_DC_DMUB + if (adev->dm.dc->ctx->dmub_srv) { + dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); + adev->dm.dc->ctx->dmub_srv = NULL; + } + if (adev->dm.dmub_bo) amdgpu_bo_free_kernel(&adev->dm.dmub_bo, &adev->dm.dmub_bo_gpu_addr, -- cgit From 2200eb9e1819aabb9a1c24d5f6b132f389a2e8a9 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 28 Oct 2019 09:22:34 -0400 Subject: drm/amd/display: Drop CONFIG_DRM_AMD_DC_DMUB guards [Why] Support for DMUB only depends on support for DC. It doesn't use floating point so we don't need to guard it by any specific DCN revision. [How] Drop the guards and cleanup the newlines around each one. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/Kconfig | 6 ----- drivers/gpu/drm/amd/display/Makefile | 12 +++------- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 20 +--------------- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 4 ---- drivers/gpu/drm/amd/display/dc/Makefile | 3 --- .../gpu/drm/amd/display/dc/bios/command_table2.c | 27 +++++++--------------- drivers/gpu/drm/amd/display/dc/core/dc.c | 6 +---- drivers/gpu/drm/amd/display/dc/dc.h | 7 +----- drivers/gpu/drm/amd/display/dc/dc_helper.c | 22 +++--------------- drivers/gpu/drm/amd/display/dc/dc_types.h | 5 +--- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c | 6 ++--- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 5 +--- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c | 5 +--- .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 2 -- drivers/gpu/drm/amd/display/dc/dm_services.h | 4 ---- drivers/gpu/drm/amd/display/dc/inc/reg_helper.h | 3 --- drivers/gpu/drm/amd/display/dmub/src/Makefile | 2 -- 17 files changed, 22 insertions(+), 117 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index fced39e229d5..313183b80032 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -30,7 +30,6 @@ config DRM_AMD_DC_DCN2_1 bool "DCN 2.1 family" depends on DRM_AMD_DC && X86 depends on DRM_AMD_DC_DCN2_0 - select DRM_AMD_DC_DMUB help Choose this option if you want to have Renoir support for display engine @@ -53,11 +52,6 @@ config DRM_AMD_DC_HDCP if you want to support HDCP authentication -config DRM_AMD_DC_DMUB - def_bool n - help - DMUB support for display engine - config DEBUG_KERNEL_DC bool "Enable kgdb break in DC" depends on DRM_AMD_DC diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 3c7332be4a89..2633de77de5e 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -34,27 +34,21 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc + ifdef CONFIG_DRM_AMD_DC_HDCP subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp endif -ifdef CONFIG_DRM_AMD_DC_DMUB -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc -endif - #TODO: remove when Timing Sync feature is complete subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 -DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power +DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power dmub/src ifdef CONFIG_DRM_AMD_DC_HDCP DAL_LIBS += modules/hdcp endif -ifdef CONFIG_DRM_AMD_DC_DMUB -DAL_LIBS += dmub/src -endif - AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) include $(AMD_DAL) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ee6ea333ba3d..8e68fec5cc33 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -30,12 +30,10 @@ #include "dc.h" #include "dc/inc/core_types.h" #include "dal_asic_id.h" -#ifdef CONFIG_DRM_AMD_DC_DMUB #include "dmub/inc/dmub_srv.h" #include "dc/inc/hw/dmcu.h" #include "dc/inc/hw/abm.h" #include "dc/dc_dmub_srv.h" -#endif #include "vid.h" #include "amdgpu.h" @@ -93,10 +91,9 @@ #include "modules/power/power_helpers.h" #include "modules/inc/mod_info_packet.h" -#ifdef CONFIG_DRM_AMD_DC_DMUB #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); -#endif + #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); @@ -677,7 +674,6 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) } } -#ifdef CONFIG_DRM_AMD_DC_DMUB static int dm_dmub_hw_init(struct amdgpu_device *adev) { const unsigned int psp_header_bytes = 0x100; @@ -816,16 +812,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; } -#endif static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; #ifdef CONFIG_DRM_AMD_DC_HDCP struct dc_callback_init init_params; #endif -#ifdef CONFIG_DRM_AMD_DC_DMUB int r; -#endif adev->dm.ddev = adev->ddev; adev->dm.adev = adev; @@ -902,14 +895,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_hardware_init(adev->dm.dc); -#ifdef CONFIG_DRM_AMD_DC_DMUB r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); goto error; } -#endif adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -982,7 +973,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) if (adev->dm.dc) dc_deinit_callbacks(adev->dm.dc); #endif -#ifdef CONFIG_DRM_AMD_DC_DMUB if (adev->dm.dc->ctx->dmub_srv) { dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); adev->dm.dc->ctx->dmub_srv = NULL; @@ -992,7 +982,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&adev->dm.dmub_bo, &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); -#endif /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) @@ -1104,7 +1093,6 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; } -#ifdef CONFIG_DRM_AMD_DC_DMUB static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) { struct amdgpu_device *adev = ctx; @@ -1190,19 +1178,15 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return 0; } -#endif static int dm_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; -#ifdef CONFIG_DRM_AMD_DC_DMUB int r; r = dm_dmub_sw_init(adev); if (r) return r; -#endif - return load_dmcu_fw(adev); } @@ -1210,7 +1194,6 @@ static int dm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; -#ifdef CONFIG_DRM_AMD_DC_DMUB if (adev->dm.dmub_srv) { dmub_srv_destroy(adev->dm.dmub_srv); adev->dm.dmub_srv = NULL; @@ -1221,7 +1204,6 @@ static int dm_sw_fini(void *handle) adev->dm.dmub_fw = NULL; } -#endif if(adev->dm.fw_dmcu) { release_firmware(adev->dm.fw_dmcu); adev->dm.fw_dmcu = NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index c636a958e377..ab5da59aadc1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -57,10 +57,8 @@ struct amdgpu_device; struct drm_device; struct amdgpu_dm_irq_handler_data; struct dc; -#ifdef CONFIG_DRM_AMD_DC_DMUB struct amdgpu_bo; struct dmub_srv; -#endif struct common_irq_params { struct amdgpu_device *adev; @@ -125,7 +123,6 @@ struct amdgpu_display_manager { struct dc *dc; -#ifdef CONFIG_DRM_AMD_DC_DMUB /** * @dmub_srv: * @@ -170,7 +167,6 @@ struct amdgpu_display_manager { */ uint32_t dmcub_fw_version; -#endif /** * @cgs_device: * diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 6fe39f6392c7..90482b158283 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -70,9 +70,6 @@ AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o) AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE) AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE) -ifdef CONFIG_DRM_AMD_DC_DMUB DC_DMUB += dc_dmub_srv.o AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB)) AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) -endif - diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index a3d890050e39..1836f16bb7fe 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -37,10 +37,8 @@ #include "bios_parser_types_internal2.h" #include "amdgpu.h" -#ifdef CONFIG_DRM_AMD_DC_DMUB #include "dc_dmub_srv.h" #include "dc.h" -#endif #define DC_LOGGER \ bp->base.ctx->logger @@ -107,7 +105,6 @@ static void init_dig_encoder_control(struct bios_parser *bp) } } -#ifdef CONFIG_DRM_AMD_DC_DMUB static void encoder_control_dmcub( struct dc_dmub_srv *dmcub, struct dig_encoder_stream_setup_parameters_v1_5 *dig) @@ -121,7 +118,7 @@ static void encoder_control_dmcub( dc_dmub_srv_cmd_execute(dmcub); dc_dmub_srv_wait_idle(dmcub); } -#endif + static enum bp_result encoder_control_digx_v1_5( struct bios_parser *bp, struct bp_encoder_control *cntl) @@ -173,13 +170,12 @@ static enum bp_result encoder_control_digx_v1_5( default: break; } -#ifdef CONFIG_DRM_AMD_DC_DMUB + if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { encoder_control_dmcub(bp->base.ctx->dmub_srv, ¶ms); return BP_RESULT_OK; } -#endif if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params)) result = BP_RESULT_OK; @@ -216,7 +212,7 @@ static void init_transmitter_control(struct bios_parser *bp) break; } } -#ifdef CONFIG_DRM_AMD_DC_DMUB + static void transmitter_control_dmcub( struct dc_dmub_srv *dmcub, struct dig_transmitter_control_parameters_v1_6 *dig) @@ -230,7 +226,7 @@ static void transmitter_control_dmcub( dc_dmub_srv_cmd_execute(dmcub); dc_dmub_srv_wait_idle(dmcub); } -#endif + static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl) @@ -262,14 +258,11 @@ static enum bp_result transmitter_control_v1_6( __func__, ps.param.symclk_10khz); } - -#ifdef CONFIG_DRM_AMD_DC_DMUB if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { transmitter_control_dmcub(bp->base.ctx->dmub_srv, &ps.param); return BP_RESULT_OK; } -#endif /*color_depth not used any more, driver has deep color factor in the Phyclk*/ if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps)) @@ -303,7 +296,6 @@ static void init_set_pixel_clock(struct bios_parser *bp) } } -#ifdef CONFIG_DRM_AMD_DC_DMUB static void set_pixel_clock_dmcub( struct dc_dmub_srv *dmcub, struct set_pixel_clock_parameter_v1_7 *clk) @@ -317,7 +309,6 @@ static void set_pixel_clock_dmcub( dc_dmub_srv_cmd_execute(dmcub); dc_dmub_srv_wait_idle(dmcub); } -#endif static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, @@ -393,13 +384,12 @@ static enum bp_result set_pixel_clock_v7( if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN; -#ifdef CONFIG_DRM_AMD_DC_DMUB if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { set_pixel_clock_dmcub(bp->base.ctx->dmub_srv, &clk); return BP_RESULT_OK; } -#endif + if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk)) result = BP_RESULT_OK; } @@ -653,7 +643,7 @@ static void init_enable_disp_power_gating( break; } } -#ifdef CONFIG_DRM_AMD_DC_DMUB + static void enable_disp_power_gating_dmcub( struct dc_dmub_srv *dmcub, struct enable_disp_power_gating_parameters_v2_1 *pwr) @@ -667,7 +657,7 @@ static void enable_disp_power_gating_dmcub( dc_dmub_srv_cmd_execute(dmcub); dc_dmub_srv_wait_idle(dmcub); } -#endif + static enum bp_result enable_disp_power_gating_v2_1( struct bios_parser *bp, enum controller_id crtc_id, @@ -687,14 +677,13 @@ static enum bp_result enable_disp_power_gating_v2_1( ps.param.enable = bp->cmd_helper->disp_power_gating_action_to_atom(action); -#ifdef CONFIG_DRM_AMD_DC_DMUB if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { enable_disp_power_gating_dmcub(bp->base.ctx->dmub_srv, &ps.param); return BP_RESULT_OK; } -#endif + if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param)) result = BP_RESULT_OK; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2bf4da41577a..0a5f91ab4652 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -58,10 +58,7 @@ #include "hubp.h" #include "dc_link_dp.h" - -#ifdef CONFIG_DRM_AMD_DC_DMUB #include "dc_dmub_srv.h" -#endif #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" @@ -2410,10 +2407,9 @@ void dc_set_power_state( switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: dc_resource_state_construct(dc, dc->current_state); -#ifdef CONFIG_DRM_AMD_DC_DMUB + if (dc->ctx->dmub_srv) dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); -#endif dc->hwss.init_hw(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 33828f03fe9e..30a2783881d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -112,9 +112,7 @@ struct dc_caps { bool disable_dp_clk_share; bool psp_setup_panel_mode; bool extended_aux_timeout_support; -#ifdef CONFIG_DRM_AMD_DC_DMUB bool dmcub_support; -#endif #ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool hw_3d_lut; #endif @@ -404,11 +402,9 @@ struct dc_debug_options { unsigned int force_odm_combine; //bit vector based on otg inst unsigned int force_fclk_khz; bool disable_tri_buf; -#ifdef CONFIG_DRM_AMD_DC_DMUB bool dmub_offload_enabled; bool dmcub_emulation; bool dmub_command_table; /* for testing only */ -#endif struct dc_bw_validation_profile bw_val_profile; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_fec; @@ -566,10 +562,9 @@ struct dc_init_data { struct dc_bios *vbios_override; enum dce_environment dce_environment; -#ifdef CONFIG_DRM_AMD_DC_DMUB struct dmub_offload_funcs *dmub_if; struct dc_reg_helper_state *dmub_offload; -#endif + struct dc_config flags; uint32_t log_mask; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 59b4bbefb7a4..8da4576e2908 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -32,7 +32,6 @@ #include "dm_services.h" #include -#ifdef CONFIG_DRM_AMD_DC_DMUB #include "dc.h" #include "dc_dmub_srv.h" @@ -100,7 +99,6 @@ static inline void submit_dmub_reg_wait( ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; } -#endif struct dc_reg_value_masks { uint32_t value; @@ -147,7 +145,6 @@ static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, } } -#ifdef CONFIG_DRM_AMD_DC_DMUB static void dmub_flush_buffer_execute( struct dc_reg_helper_state *offload, const struct dc_context *ctx) @@ -239,8 +236,6 @@ static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, cmd_buf->reg_wait.time_out_us = time_out_us; } -#endif - uint32_t generic_reg_update_ex(const struct dc_context *ctx, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, @@ -257,12 +252,10 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx, va_end(ap); -#ifdef CONFIG_DRM_AMD_DC_DMUB if (ctx->dmub_srv && ctx->dmub_srv->reg_helper_offload.gather_in_progress) return dmub_reg_value_pack(ctx, addr, &field_value_mask); /* todo: return void so we can decouple code running in driver from register states */ -#endif /* mmio write directly */ reg_val = dm_read_reg(ctx, addr); @@ -289,13 +282,13 @@ uint32_t generic_reg_set_ex(const struct dc_context *ctx, /* mmio write directly */ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; -#ifdef CONFIG_DRM_AMD_DC_DMUB + if (ctx->dmub_srv && ctx->dmub_srv->reg_helper_offload.gather_in_progress) { return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); /* todo: return void so we can decouple code running in driver from register states */ } -#endif + dm_write_reg(ctx, addr, reg_val); return reg_val; } @@ -313,14 +306,12 @@ uint32_t dm_read_reg_func( } #endif -#ifdef CONFIG_DRM_AMD_DC_DMUB if (ctx->dmub_srv && ctx->dmub_srv->reg_helper_offload.gather_in_progress && !ctx->dmub_srv->reg_helper_offload.should_burst_write) { ASSERT(false); return 0; } -#endif value = cgs_read_register(ctx->cgs_device, address); trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); @@ -487,14 +478,12 @@ void generic_reg_wait(const struct dc_context *ctx, uint32_t reg_val; int i; -#ifdef CONFIG_DRM_AMD_DC_DMUB if (ctx->dmub_srv && ctx->dmub_srv->reg_helper_offload.gather_in_progress) { dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, delay_between_poll_us * time_out_num_tries); return; } -#endif /* something is terribly wrong if time out is > 200ms. (5Hz) */ ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); @@ -542,13 +531,12 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx, uint32_t index) { uint32_t value = 0; -#ifdef CONFIG_DRM_AMD_DC_DMUB + // when reg read, there should not be any offload. if (ctx->dmub_srv && ctx->dmub_srv->reg_helper_offload.gather_in_progress) { ASSERT(false); } -#endif dm_write_reg(ctx, addr_index, index); value = dm_read_reg(ctx, addr_data); @@ -587,7 +575,6 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, return reg_val; } -#ifdef CONFIG_DRM_AMD_DC_DMUB void reg_sequence_start_gather(const struct dc_context *ctx) { /* if reg sequence is supported and enabled, set flag to @@ -652,6 +639,3 @@ void reg_sequence_wait_done(const struct dc_context *ctx) dc_dmub_srv_wait_idle(ctx->dmub_srv); } } - - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index fb70ed9b351f..7ab7644458e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -48,9 +48,7 @@ struct dc_stream_state; struct dc_link; struct dc_sink; struct dal; -#ifdef CONFIG_DRM_AMD_DC_DMUB struct dc_dmub_srv; -#endif /******************************** * Environment definitions @@ -112,9 +110,8 @@ struct dc_context { uint32_t dc_sink_id_count; uint32_t dc_stream_id_count; uint64_t fbc_gpu_addr; -#ifdef CONFIG_DRM_AMD_DC_DMUB struct dc_dmub_srv *dmub_srv; -#endif + #ifdef CONFIG_DRM_AMD_DC_HDCP struct cp_psp cp_psp; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index 41a0e53d2ba4..6f1a312c6a5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -352,9 +352,8 @@ void dpp1_cm_program_regamma_lut(struct dpp *dpp_base, uint32_t i; struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); -#ifdef CONFIG_DRM_AMD_DC_DMUB REG_SEQ_START(); -#endif + for (i = 0 ; i < num; i++) { REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); @@ -633,10 +632,9 @@ void dpp1_set_degamma( BREAK_TO_DEBUGGER(); break; } -#ifdef CONFIG_DRM_AMD_DC_DMUB + REG_SEQ_SUBMIT(); REG_SEQ_WAIT_DONE(); -#endif } void dpp1_degamma_ram_select( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 8710f3ac2abf..30c025918568 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -457,18 +457,15 @@ static bool optc1_enable_crtc(struct timing_generator *optc) REG_UPDATE(CONTROL, VTG0_ENABLE, 1); -#ifdef CONFIG_DRM_AMD_DC_DMUB REG_SEQ_START(); -#endif + /* Enable CRTC */ REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 3, OTG_MASTER_EN, 1); -#ifdef CONFIG_DRM_AMD_DC_DMUB REG_SEQ_SUBMIT(); REG_SEQ_WAIT_DONE(); -#endif return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 2417d933ef2b..f90031ed58a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -345,10 +345,8 @@ static void mpc20_program_ogam_pwl( uint32_t i; struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); -#ifdef CONFIG_DRM_AMD_DC_DMUB PERF_TRACE(); REG_SEQ_START(); -#endif for (i = 0 ; i < num; i++) { REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg); @@ -468,12 +466,11 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) ASSERT(!mpc_disabled); ASSERT(!mpc_idle); } -#ifdef CONFIG_DRM_AMD_DC_DMUB + REG_SEQ_SUBMIT(); PERF_TRACE(); REG_SEQ_WAIT_DONE(); PERF_TRACE(); -#endif } static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index eb7ce4a5cbac..6fcfc0a643c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1678,9 +1678,7 @@ static bool construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; -#ifdef CONFIG_DRM_AMD_DC_DMUB dc->caps.dmcub_support = true; -#endif if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 0a3891edfd94..968ff1fef486 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -40,11 +40,9 @@ #undef DEPRECATED -#ifdef CONFIG_DRM_AMD_DC_DMUB struct dmub_srv; struct dc_dmub_srv; -#endif irq_handler_idx dm_register_interrupt( struct dc_context *ctx, struct dc_interrupt_params *int_params, @@ -144,14 +142,12 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); -#ifdef CONFIG_DRM_AMD_DC_DMUB struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub); void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv); void reg_sequence_start_gather(const struct dc_context *ctx); void reg_sequence_start_execute(const struct dc_context *ctx); void reg_sequence_wait_done(const struct dc_context *ctx); -#endif #define FD(reg_field) reg_field ## __SHIFT, \ reg_field ## _MASK diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index a9a9657c095a..47e307388581 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -485,8 +485,6 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); - -#ifdef CONFIG_DRM_AMD_DC_DMUB /* register offload macros * * instead of MMIO to register directly, in some cases we want @@ -505,6 +503,5 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, /* wait for the last REG_SEQ_SUBMIT to finish */ #define REG_SEQ_WAIT_DONE() \ reg_sequence_wait_done(CTX) -#endif #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile index f3b844f474fd..e08dfeea24b0 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/Makefile +++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile @@ -20,10 +20,8 @@ # OTHER DEALINGS IN THE SOFTWARE. # -ifdef CONFIG_DRM_AMD_DC_DMUB DMUB = dmub_srv.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB)) AMD_DISPLAY_FILES += $(AMD_DAL_DMUB) -endif -- cgit From 37f1b9f91904bd4af07b5787b3f8c93069607eb3 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Tue, 15 Oct 2019 08:35:14 -0400 Subject: drm/amd/display: 3.2.57 Signed-off-by: Aric Cyr Reviewed-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 30a2783881d9..d37818730960 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.56" +#define DC_VER "3.2.57" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit From 47f365645a3b4a6f094fdfab080817676bfcb417 Mon Sep 17 00:00:00 2001 From: David Galiffi Date: Sat, 12 Oct 2019 16:18:32 -0400 Subject: drm/amd/display: Fix assert observed when performing dummy p-state check [WHY] V.Active dram clock change workaround need a small modification for DMLv2 to ensure that the dummy p-state check doesn't fail. Signed-off-by: David Galiffi Reviewed-by: Jun Lei Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c | 4 ++++ drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h | 1 + 3 files changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 3c70dd577292..d63ca4ccf7cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -2611,9 +2611,13 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; + if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; + } else if (mode_lib->vba.DummyPStateCheck && + mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { + mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 7f9a5621922f..81db8517a690 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -222,6 +222,8 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib) mode_lib->vba.SRExitTime = soc->sr_exit_time_us; mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us; mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us; + mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us; + mode_lib->vba.Downspreading = soc->downspread_percent; mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new! mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new! diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 1540ffbe3979..6c59a332093a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -155,6 +155,7 @@ struct vba_vars_st { double UrgentLatencySupportUsChroma; unsigned int DSCFormatFactor; + bool DummyPStateCheck; bool PrefetchModeSupported; enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank; // Mode Support only double XFCRemoteSurfaceFlipDelay; -- cgit From 1cad8ff7ecc6b70a062b8e8b74a0cd08c928341d Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 11 Oct 2019 15:34:20 -0400 Subject: drm/amd/display: Renoir chroma viewport WA [Why] For unknown reason, immediate flip with host VM translation on NV12 surface will underflow on last row of PTE. [How] Hack chroma viewport height to make fetch one more row of PTE. Note that this will cause hubp underflow on all video underlay cases, but the underflow is not user visible since it is in blank region. Signed-off-by: Eric Yang Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 + drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 65 +++++++++++++++++++++- .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 1 + 3 files changed, 67 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index d37818730960..5d47871ff19c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -421,6 +421,8 @@ struct dc_debug_options { bool cm_in_bypass; #endif int force_clock_mode;/*every mode change.*/ + + bool nv12_iflip_vm_wa; }; struct dc_debug_data { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 2f5a5867e674..1ddd6ae22155 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -164,6 +164,69 @@ static void hubp21_setup( } +void hubp21_set_viewport( + struct hubp *hubp, + const struct rect *viewport, + const struct rect *viewport_c) +{ + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + int patched_viewport_height = 0; + struct dc_debug_options *debug = &hubp->ctx->dc->debug; + + REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0, + PRI_VIEWPORT_WIDTH, viewport->width, + PRI_VIEWPORT_HEIGHT, viewport->height); + + REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0, + PRI_VIEWPORT_X_START, viewport->x, + PRI_VIEWPORT_Y_START, viewport->y); + + /*for stereo*/ + REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0, + SEC_VIEWPORT_WIDTH, viewport->width, + SEC_VIEWPORT_HEIGHT, viewport->height); + + REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0, + SEC_VIEWPORT_X_START, viewport->x, + SEC_VIEWPORT_Y_START, viewport->y); + + /* + * Work around for underflow issue with NV12 + rIOMMU translation + * + immediate flip. This will cause hubp underflow, but will not + * be user visible since underflow is in blank region + */ + patched_viewport_height = viewport_c->height; + if (viewport_c->height != 0 && debug->nv12_iflip_vm_wa) { + int pte_row_height = 0; + int pte_rows = 0; + + REG_GET(DCHUBP_REQ_SIZE_CONFIG, + PTE_ROW_HEIGHT_LINEAR, &pte_row_height); + + pte_row_height = 1 << (pte_row_height + 3); + pte_rows = (viewport_c->height + pte_row_height - 1) / pte_row_height; + patched_viewport_height = pte_rows * pte_row_height + 3; + } + + + /* DC supports NV12 only at the moment */ + REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0, + PRI_VIEWPORT_WIDTH_C, viewport_c->width, + PRI_VIEWPORT_HEIGHT_C, patched_viewport_height); + + REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0, + PRI_VIEWPORT_X_START_C, viewport_c->x, + PRI_VIEWPORT_Y_START_C, viewport_c->y); + + REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0, + SEC_VIEWPORT_WIDTH_C, viewport_c->width, + SEC_VIEWPORT_HEIGHT_C, patched_viewport_height); + + REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0, + SEC_VIEWPORT_X_START_C, viewport_c->x, + SEC_VIEWPORT_Y_START_C, viewport_c->y); +} + void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, struct vm_system_aperture_param *apt) { @@ -211,7 +274,7 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings, .set_blank = hubp1_set_blank, .dcc_control = hubp1_dcc_control, - .mem_program_viewport = min_set_viewport, + .mem_program_viewport = hubp21_set_viewport, .set_cursor_attributes = hubp2_cursor_set_attributes, .set_cursor_position = hubp1_cursor_set_position, .hubp_clk_cntl = hubp1_clk_cntl, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 6fcfc0a643c0..44dc1d15c334 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -831,6 +831,7 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = true, .disable_48mhz_pwrdwn = false, + .nv12_iflip_vm_wa = true }; static const struct dc_debug_options debug_defaults_diags = { -- cgit From 4338ffa8df23d6665f53dd38da2e415940e3bf13 Mon Sep 17 00:00:00 2001 From: Sung Lee Date: Wed, 16 Oct 2019 10:24:01 -0400 Subject: drm/amd/display: Use SIGNAL_TYPE_NONE in disable_output unless eDP [WHY] Currently made a change where disable_output is called using signal_type. Using actual signal_type when calilng disable_output in power_down_encoders would make DP to HDMI dongle not light up on boot. As it would have signal_type SIGNAL_TYPE_DISPLAY_PORT. [HOW] Set signal_type to SIGNAL_TYPE_NONE unless it is eDP. Signed-off-by: Sung Lee Reviewed-by: Yongqiang Sun Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index f0e837d14000..37f5bbcba155 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1438,6 +1438,9 @@ static void power_down_encoders(struct dc *dc) if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(dc->links[i], false); + if (signal != SIGNAL_TYPE_EDP) + signal = SIGNAL_TYPE_NONE; + dc->links[i]->link_enc->funcs->disable_output( dc->links[i]->link_enc, signal); } -- cgit From a5132f9728dc7c310736b83422e93e0ad563a3b6 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Wed, 16 Oct 2019 14:34:15 -0400 Subject: drm/amd/display: Add a sanity check for DSC already enabled/disabled [why] If acquire/release DSC resource sequence is affected by a regression, it can happen that the already-in-use DSC HW block is being wrongly re-used for a different pipe. The reverse is also possible, i.e. already-disabled DSC HW block could be disabled from other context. [how] Read back the enable state of DSC HW and report an error if duplicate enable or disable was attempted. Signed-off-by: Nikola Cornij Reviewed-by: Dmytro Laktyushkin Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c | 25 +++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 63eb377ed9c0..dc9944427d2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -222,9 +222,18 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe) { struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + int dsc_clock_en; + int dsc_fw_config; + int enabled_opp_pipe; - /* TODO Check if DSC alreay in use? */ - DC_LOG_DSC("enable DSC at opp pipe %d", opp_pipe); + DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe); + + REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); + if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) { + DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe); + ASSERT(0); + } REG_UPDATE(DSC_TOP_CONTROL, DSC_CLOCK_EN, 1); @@ -238,8 +247,18 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe) static void dsc2_disable(struct display_stream_compressor *dsc) { struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + int dsc_clock_en; + int dsc_fw_config; + int enabled_opp_pipe; - DC_LOG_DSC("disable DSC"); + DC_LOG_DSC("disable DSC %d", dsc->inst); + + REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); + if (!dsc_clock_en || !dsc_fw_config) { + DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe); + ASSERT(0); + } REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, 0); -- cgit From 5ed78cd69a1bf64e6bdb70a05c1c9bde87b209ad Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Wed, 16 Oct 2019 23:44:55 -0400 Subject: drm/amd/display: set MSA MISC1 bit 6 while sending colorimetry in VSC SDP [Why] It is confusing to sinks if we send VSC SDP only on some format. Today we signal colorimetry format using MSA while in formats like sRGB. But when we switch to BT2020 we set the bit to ignore MSA colorimetry and instead use the colorimetry information in the VSC SDP. But if sink supports signaling of colorimetry via VSC SDP we should always set the MSA MISC1 bit 6, instead of doing so selectively. [How] If sink supports signaling of colorimetry via VSC SDP, and we are sending the colorimetry info via VSC SDP with packet revision 05h, then always set MSA MISC1 bit 6. Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 +- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 1 + drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 + .../drm/amd/display/dc/dce/dce_stream_encoder.c | 1 + .../amd/display/dc/dcn10/dcn10_stream_encoder.c | 6 +-- .../amd/display/dc/dcn10/dcn10_stream_encoder.h | 1 + .../amd/display/dc/dcn20/dcn20_stream_encoder.c | 7 +++- .../amd/display/dc/dcn20/dcn20_stream_encoder.h | 1 + .../gpu/drm/amd/display/dc/inc/hw/stream_encoder.h | 1 + .../display/dc/virtual/virtual_stream_encoder.c | 1 + .../drm/amd/display/modules/inc/mod_info_packet.h | 4 +- .../amd/display/modules/info_packet/info_packet.c | 46 +++++++++++++++++----- 12 files changed, 57 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8e68fec5cc33..f2c9a5e5c6cc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4022,7 +4022,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, struct dmcu *dmcu = core_dc->res_pool->dmcu; stream->psr_version = dmcu->dmcu_version.psr_version; - mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); + mod_build_vsc_infopacket(stream, + &stream->vsc_infopacket, + &stream->use_vsc_sdp_for_colorimetry); } } finish: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 12ba6fdf89b7..b58916212187 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2946,6 +2946,7 @@ void core_link_enable_stream( pipe_ctx->stream_res.stream_enc, &stream->timing, stream->output_color_space, + stream->use_vsc_sdp_for_colorimetry, stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index fdb6adc37857..f8c07d5a4054 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -164,6 +164,7 @@ struct dc_stream_state { enum view_3d_format view_format; + bool use_vsc_sdp_for_colorimetry; bool ignore_msa_timing_param; bool converter_disable_audio; uint8_t qs_bit; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 6ed922a3c1cd..2baaac1e5156 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -275,6 +275,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { #if defined(CONFIG_DRM_AMD_DC_DCN1_0) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 06e5bbb4545c..376c4264d295 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -247,6 +247,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { uint32_t h_active_start; @@ -312,10 +313,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7, * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care"). */ - if ((hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) || - (output_color_space == COLOR_SPACE_2020_YCBCR) || - (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) || - (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)) + if (use_vsc_sdp_for_colorimetry) misc1 = misc1 | 0x40; else misc1 = misc1 & ~0x40; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index c9cbc21d121e..2f00f2389e40 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -526,6 +526,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting); void enc1_stream_encoder_hdmi_set_stream_attribute( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 4b3401616434..6c2d82ce43ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -533,11 +533,16 @@ void enc2_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting); + enc1_stream_encoder_dp_set_stream_attribute(enc, + crtc_timing, + output_color_space, + use_vsc_sdp_for_colorimetry, + enable_sdp_splitting); REG_UPDATE(DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, enable_sdp_splitting); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h index 3f94a9f13c4a..d2a805bd4573 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h @@ -98,6 +98,7 @@ void enc2_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting); void enc2_stream_encoder_dp_unblank( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 6305e388612a..c0b93d51ca8d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -126,6 +126,7 @@ struct stream_encoder_funcs { struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting); void (*hdmi_set_stream_attribute)( diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index ff664bdb1482..d1ce75212d9f 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -32,6 +32,7 @@ static void virtual_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) {} static void virtual_stream_encoder_hdmi_set_stream_attribute( diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index ca8ce3c55337..42cbeffac640 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -26,6 +26,7 @@ #ifndef MOD_INFO_PACKET_H_ #define MOD_INFO_PACKET_H_ +#include "dm_services.h" #include "mod_shared.h" //Forward Declarations struct dc_stream_state; @@ -33,7 +34,8 @@ struct dc_info_packet; struct mod_vrr_params; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet); + struct dc_info_packet *info_packet, + bool *use_vsc_sdp_for_colorimetry); void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue); diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index db6b08f6d093..6a8a056424b8 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -30,6 +30,20 @@ #include "mod_freesync.h" #include "dc.h" +enum vsc_packet_revision { + vsc_packet_undefined = 0, + //01h = VSC SDP supports only 3D stereo. + vsc_packet_rev1 = 1, + //02h = 3D stereo + PSR. + vsc_packet_rev2 = 2, + //03h = 3D stereo + PSR2. + vsc_packet_rev3 = 3, + //04h = 3D stereo + PSR/PSR2 + Y-coordinate. + vsc_packet_rev4 = 4, + //05h = 3D stereo + PSR/PSR2 + Y-coordinate + Pixel Encoding/Colorimetry Format + vsc_packet_rev5 = 5, +}; + #define HDMI_INFOFRAME_TYPE_VENDOR 0x81 #define HF_VSIF_VERSION 1 @@ -116,35 +130,41 @@ enum ColorimetryYCCDP { }; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet) + struct dc_info_packet *info_packet, + bool *use_vsc_sdp_for_colorimetry) { - unsigned int vscPacketRevision = 0; + unsigned int vsc_packet_revision = vsc_packet_undefined; unsigned int i; unsigned int pixelEncoding = 0; unsigned int colorimetryFormat = 0; bool stereo3dSupport = false; + /* Initialize first, later if infopacket is valid determine if VSC SDP + * should be used to signal colorimetry format and pixel encoding. + */ + *use_vsc_sdp_for_colorimetry = false; + if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) { - vscPacketRevision = 1; + vsc_packet_revision = vsc_packet_rev1; stereo3dSupport = true; } /*VSC packet set to 2 when DP revision >= 1.2*/ if (stream->psr_version != 0) - vscPacketRevision = 2; + vsc_packet_revision = vsc_packet_rev2; /* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */ if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) - vscPacketRevision = 5; + vsc_packet_revision = vsc_packet_rev5; /* VSC packet not needed based on the features * supported by this DP display */ - if (vscPacketRevision == 0) + if (vsc_packet_revision == vsc_packet_undefined) return; - if (vscPacketRevision == 0x2) { + if (vsc_packet_revision == vsc_packet_rev2) { /* Secondary-data Packet ID = 0*/ info_packet->hb0 = 0x00; /* 07h - Packet Type Value indicating Video @@ -166,7 +186,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, info_packet->valid = true; } - if (vscPacketRevision == 0x1) { + if (vsc_packet_revision == vsc_packet_rev1) { info_packet->hb0 = 0x00; // Secondary-data Packet ID = 0 info_packet->hb1 = 0x07; // 07h = Packet Type Value indicating Video Stream Configuration packet @@ -237,7 +257,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, * the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").) */ - if (vscPacketRevision == 0x5) { + if (vsc_packet_revision == vsc_packet_rev5) { /* Secondary-data Packet ID = 0 */ info_packet->hb0 = 0x00; /* 07h - Packet Type Value indicating Video Stream Configuration packet */ @@ -249,6 +269,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, info_packet->valid = true; + /* If we are using VSC SDP revision 05h, use this to signal for + * colorimetry format and pixel encoding. HW should later be + * programmed to set MSA MISC1 bit 6 to indicate ignore + * colorimetry format and pixel encoding in the MSA. + */ + *use_vsc_sdp_for_colorimetry = true; + /* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs * Data Bytes DB 18~16 * Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding) @@ -393,7 +420,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, */ info_packet->sb[18] = 0; } - } /** -- cgit From 5622b2d68d0a6e2fd960f2129704dc3c561608b2 Mon Sep 17 00:00:00 2001 From: David Galiffi Date: Tue, 1 Oct 2019 18:29:56 -0400 Subject: drm/amd/display: Create debug option to disable v.active clock change policy. [WHY] It has been a useful option in debugging GFXOFF and P.State Change issues. May be required as for platform specific workaround. [HOW] Create option in enum dc_debug_options, "disable_vactive_clock_change". When it is set, dm_dram_clock_change_vactive, will translate into p_state_change_support: false. Signed-off-by: David Galiffi Reviewed-by: Jun Lei Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c | 3 ++- drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c | 6 +++--- drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 1 + drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h | 1 + 7 files changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 5d47871ff19c..cc45d77a3b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -423,6 +423,7 @@ struct dc_debug_options { int force_clock_mode;/*every mode change.*/ bool nv12_iflip_vm_wa; + bool disable_dram_clock_change_vactive_support; }; struct dc_debug_data { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index bbd1c98564be..e73a65606a4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2850,6 +2850,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool full_pstate_supported = false; bool dummy_pstate_supported = false; double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; + context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = dc->debug.disable_dram_clock_change_vactive_support; if (fast_validate) return dcn20_validate_bandwidth_internal(dc, context, true); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 6c6c486b774a..77b7574c63cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -2577,7 +2577,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; - if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { + if (mode_lib->vba.DRAMClockChangeSupportsVActive && + mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index d63ca4ccf7cf..62dfd36d830a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -2611,12 +2611,12 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; - - if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { + if (mode_lib->vba.DRAMClockChangeSupportsVActive && + mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else if (mode_lib->vba.DummyPStateCheck && - mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { + mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index cfacd6027467..19356180cbb6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -112,6 +112,7 @@ struct _vcs_dpi_soc_bounding_box_st { bool do_urgent_latency_adjustment; double urgent_latency_adjustment_fabric_clock_component_us; double urgent_latency_adjustment_fabric_clock_reference_mhz; + bool disable_dram_clock_change_vactive_support; }; struct _vcs_dpi_ip_params_st { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 81db8517a690..da5e9d2fd6b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -223,6 +223,8 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib) mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us; mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us; mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us; + mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support || + mode_lib->vba.DummyPStateCheck; mode_lib->vba.Downspreading = soc->downspread_percent; mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new! diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 6c59a332093a..6d8b5c61de68 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -156,6 +156,7 @@ struct vba_vars_st { unsigned int DSCFormatFactor; bool DummyPStateCheck; + bool DRAMClockChangeSupportsVActive; bool PrefetchModeSupported; enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank; // Mode Support only double XFCRemoteSurfaceFlipDelay; -- cgit From f2988e67144a263e33aa3b916457bf3095288c94 Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Fri, 18 Oct 2019 18:24:59 -0400 Subject: drm/amd/display: optimize bandwidth after commit streams. [Why] System is unable to enter S0i3 due to DISPLAY_OFF_MASK not asserted in SMU. [How] Optimized bandwidth should be called paired and to resolve unplug display underflow issue, optimize bandwidth after commit streams is moved to next page flip, in case of S0i3, there is a change for no flip coming causing display count is 1 in SMU side. Add optimize bandwidth after commit stream. Signed-off-by: Yongqiang Sun Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0a5f91ab4652..cbdd049ca76b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1246,6 +1246,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_enable_stereo(dc, context, dc_streams, context->stream_count); + if (!dc->optimize_seamless_boot) + /* pplib is notified if disp_num changed */ + dc->hwss.optimize_bandwidth(dc, context); + for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; -- cgit From eb7d81af3c209b5135ec4402adf4e0cd5482b338 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 21 Oct 2019 08:16:22 -0400 Subject: drm/amd/display: 3.2.58 Signed-off-by: Aric Cyr Reviewed-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index cc45d77a3b0d..f12ad4b17781 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.57" +#define DC_VER "3.2.58" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit From ae8cf9774e3a609066b277c3742c3551564fb079 Mon Sep 17 00:00:00 2001 From: "Leo (Hanghong) Ma" Date: Fri, 6 Sep 2019 09:49:19 -0400 Subject: drm/amd/display: Add some hardware status in DTN log debugfs [Why] For debug purpose, we need to check the following hardware status in DTN log debugfs: 1.dpp & hubp clock enable; 2.crtc blank enable; 3.link phy status; [How] Add the upper information in the amdgpu_dm_dtn_log debugfs. For CRTC blanked status, since DCN2 and greater reports it on the OPP instead of OTG, we patch it in after calling optc1_read_otg_states. Ideally, this should be done in the DCN version specific function hooks. It has been left as a TODO item. Signed-off-by: Leo (Hanghong) Ma Reviewed-by: Mikita Lipski Acked-by: Bhawanpreet Lakha Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 3 ++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 1 + .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 43 +++++++++++++++------- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c | 1 + drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 1 + .../drm/amd/display/dc/dcn20/dcn20_link_encoder.c | 1 + .../gpu/drm/amd/display/dc/inc/hw/link_encoder.h | 1 + 7 files changed, 37 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 14d1be6c66e6..5aeee938605a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1014,6 +1014,9 @@ void hubp1_read_state_common(struct hubp *hubp) HUBP_TTU_DISABLE, &s->ttu_disable, HUBP_UNDERFLOW_STATUS, &s->underflow_status); + REG_GET(HUBP_CLK_CNTL, + HUBP_CLOCK_ENABLE, &s->clock_en); + REG_GET(DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, &s->min_ttu_vblank); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index ae70d9c0aa1d..e65e76f018e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -670,6 +670,7 @@ struct dcn_hubp_state { uint32_t sw_mode; uint32_t dcc_en; uint32_t blank_en; + uint32_t clock_en; uint32_t underflow_status; uint32_t ttu_disable; uint32_t min_ttu_vblank; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 32844cd50d09..0a30d97b2656 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -129,9 +129,8 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) struct resource_pool *pool = dc->res_pool; int i; - DTN_INFO("HUBP: format addr_hi width height" - " rot mir sw_mode dcc_en blank_en ttu_dis underflow" - " min_ttu_vblank qos_low_wm qos_high_wm\n"); + DTN_INFO( + "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n"); for (i = 0; i < pool->pipe_count; i++) { struct hubp *hubp = pool->hubps[i]; struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state); @@ -139,8 +138,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) hubp->funcs->hubp_read_state(hubp); if (!s->blank_en) { - DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh" - " %6d %8d %7d %8xh", + DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh", hubp->inst, s->pixel_format, s->inuse_addr_hi, @@ -151,6 +149,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) s->sw_mode, s->dcc_en, s->blank_en, + s->clock_en, s->ttu_disable, s->underflow_status); DTN_INFO_MICRO_SEC(s->min_ttu_vblank); @@ -308,21 +307,35 @@ void dcn10_log_hw_state(struct dc *dc, } DTN_INFO("\n"); - DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel" - " h_bs h_be h_ss h_se hpol htot vtot underflow\n"); + DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n"); for (i = 0; i < pool->timing_generator_count; i++) { struct timing_generator *tg = pool->timing_generators[i]; struct dcn_otg_state s = {0}; - + /* Read shared OTG state registers for all DCNx */ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); +#ifdef CONFIG_DRM_AMD_DC_DCN2_0 + /* + * For DCN2 and greater, a register on the OPP is used to + * determine if the CRTC is blanked instead of the OTG. So use + * dpg_is_blanked() if exists, otherwise fallback on otg. + * + * TODO: Implement DCN-specific read_otg_state hooks. + */ + if (pool->opps[i]->funcs->dpg_is_blanked) + s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]); + else + s.blank_enabled = tg->funcs->is_blanked(tg); +#else + s.blank_enabled = tg->funcs->is_blanked(tg); +#endif + //only print if OTG master is enabled if ((s.otg_enabled & 1) == 0) continue; - DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d" - " %5d %5d %5d %5d %9d\n", + DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n", tg->inst, s.v_blank_start, s.v_blank_end, @@ -340,7 +353,8 @@ void dcn10_log_hw_state(struct dc *dc, s.h_sync_a_pol, s.h_total, s.v_total, - s.underflow_occurred_status); + s.underflow_occurred_status, + s.blank_enabled); // Clear underflow for debug purposes // We want to keep underflow sticky bit on for the longevity tests outside of test environment. @@ -387,7 +401,7 @@ void dcn10_log_hw_state(struct dc *dc, } DTN_INFO("\n"); - DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS\n"); + DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n"); for (i = 0; i < dc->link_count; i++) { struct link_encoder *lenc = dc->links[i]->link_enc; @@ -395,11 +409,12 @@ void dcn10_log_hw_state(struct dc *dc, if (lenc->funcs->read_state) { lenc->funcs->read_state(lenc, &s); - DTN_INFO("[%-3d]: %-12d %-22d %-22d\n", + DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n", i, s.dphy_fec_en, s.dphy_fec_ready_shadow, - s.dphy_fec_active_status); + s.dphy_fec_active_status, + s.dp_link_training_complete); DTN_INFO("\n"); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 0a9ad692f541..33a3dcdb3fd1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -406,6 +406,7 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_pipe_clock_control = opp1_pipe_clock_control, #if defined(CONFIG_DRM_AMD_DC_DCN2_0) .opp_set_disp_pattern_generator = NULL, + .dpg_is_blanked = NULL, #endif .opp_destroy = opp1_destroy }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index c8d795b335ba..4476bc8cdb4d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -542,6 +542,7 @@ struct dcn_otg_state { uint32_t h_total; uint32_t underflow_occurred_status; uint32_t otg_enabled; + uint32_t blank_enabled; }; void optc1_read_otg_state(struct optc *optc1, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index e476f27aa3a9..0e0306d84cd8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -203,6 +203,7 @@ void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s) REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en); REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow); REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status); + REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete); } #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index b21909216fb6..af57751ed8a1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -124,6 +124,7 @@ struct link_enc_state { uint32_t dphy_fec_en; uint32_t dphy_fec_ready_shadow; uint32_t dphy_fec_active_status; + uint32_t dp_link_training_complete; }; #endif -- cgit From d9a07577b8a3131c90c187fb2b89662bee535cfd Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Fri, 2 Aug 2019 17:22:57 -0400 Subject: drm/amd/display: add oem i2c implemenation in dc [why] Need it for some OEM I2C devices in Nv10 [how] Link up code to parse OEM table and expose DC interface to access the pins Signed-off-by: Jun Lei Reviewed-by: Aric Cyr Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 63 +++++++++++++--------- drivers/gpu/drm/amd/display/dc/core/dc.c | 11 ++++ drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 5 +- drivers/gpu/drm/amd/display/dc/dc_link.h | 4 ++ drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c | 19 +++---- drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c | 43 --------------- drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h | 6 +-- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 15 ++++++ .../amd/display/dc/gpio/dcn20/hw_factory_dcn20.c | 12 +++++ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 + .../amd/display/include/grph_object_ctrl_defs.h | 3 +- 11 files changed, 100 insertions(+), 83 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 7873abea4112..05be2c0c25d1 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -294,11 +294,21 @@ static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb, struct atom_display_object_path_v2 *object; struct atom_common_record_header *header; struct atom_i2c_record *record; + struct atom_i2c_record dummy_record = {0}; struct bios_parser *bp = BP_FROM_DCB(dcb); if (!info) return BP_RESULT_BADINPUT; + if (id.type == OBJECT_TYPE_GENERIC) { + dummy_record.i2c_id = id.id; + + if (get_gpio_i2c_info(bp, &dummy_record, info) == BP_RESULT_OK) + return BP_RESULT_OK; + else + return BP_RESULT_NORECORD; + } + object = get_bios_object(bp, id); if (!object) @@ -341,6 +351,7 @@ static enum bp_result get_gpio_i2c_info( struct atom_gpio_pin_lut_v2_1 *header; uint32_t count = 0; unsigned int table_index = 0; + bool find_valid = false; if (!info) return BP_RESULT_BADINPUT; @@ -368,33 +379,28 @@ static enum bp_result get_gpio_i2c_info( - sizeof(struct atom_common_table_header)) / sizeof(struct atom_gpio_pin_assignment); - table_index = record->i2c_id & I2C_HW_LANE_MUX; - - if (count < table_index) { - bool find_valid = false; - - for (table_index = 0; table_index < count; table_index++) { - if (((record->i2c_id & I2C_HW_CAP) == ( - header->gpio_pin[table_index].gpio_id & - I2C_HW_CAP)) && - ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_ENGINE_ID_MASK)) && - ((record->i2c_id & I2C_HW_LANE_MUX) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_LANE_MUX))) { - /* still valid */ - find_valid = true; - break; - } + for (table_index = 0; table_index < count; table_index++) { + if (((record->i2c_id & I2C_HW_CAP) == ( + header->gpio_pin[table_index].gpio_id & + I2C_HW_CAP)) && + ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == + (header->gpio_pin[table_index].gpio_id & + I2C_HW_ENGINE_ID_MASK)) && + ((record->i2c_id & I2C_HW_LANE_MUX) == + (header->gpio_pin[table_index].gpio_id & + I2C_HW_LANE_MUX))) { + /* still valid */ + find_valid = true; + break; } - /* If we don't find the entry that we are looking for then - * we will return BP_Result_BadBiosTable. - */ - if (find_valid == false) - return BP_RESULT_BADBIOSTABLE; } + /* If we don't find the entry that we are looking for then + * we will return BP_Result_BadBiosTable. + */ + if (find_valid == false) + return BP_RESULT_BADBIOSTABLE; + /* get the GPIO_I2C_INFO */ info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false; info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX; @@ -1205,6 +1211,8 @@ static enum bp_result get_firmware_info_v3_1( bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10; } + info->oem_i2c_present = false; + return BP_RESULT_OK; } @@ -1283,6 +1291,13 @@ static enum bp_result get_firmware_info_v3_2( bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10; } + if (firmware_info->board_i2c_feature_id == 0x2) { + info->oem_i2c_present = true; + info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id; + } else { + info->oem_i2c_present = false; + } + return BP_RESULT_OK; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index cbdd049ca76b..aadd2a20fc65 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2502,6 +2502,17 @@ bool dc_submit_i2c( cmd); } +bool dc_submit_i2c_oem( + struct dc *dc, + struct i2c_command *cmd) +{ + struct ddc_service *ddc = dc->res_pool->oem_device; + return dce_i2c_submit_command( + dc->res_pool, + ddc->ddc_pin, + cmd); +} + static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) { if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 7f904d55c1bc..a2e6adacaff6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -206,7 +206,10 @@ static void construct( ddc_service->ddc_pin = NULL; } else { hw_info.ddc_channel = i2c_info.i2c_line; - hw_info.hw_supported = i2c_info.i2c_hw_assist; + if (ddc_service->link != NULL) + hw_info.hw_supported = i2c_info.i2c_hw_assist; + else + hw_info.hw_supported = false; ddc_service->ddc_pin = dal_gpio_create_ddc( gpio_service, diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index f24fd19ed93d..9270e43cd5bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -305,6 +305,10 @@ bool dc_submit_i2c( uint32_t link_index, struct i2c_command *cmd); +bool dc_submit_i2c_oem( + struct dc *dc, + struct i2c_command *cmd); + uint32_t dc_bandwidth_in_kbps_from_timing( const struct dc_crtc_timing *timing); #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c index 35a75398fcb4..dd41736bb5c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c @@ -31,7 +31,7 @@ bool dce_i2c_submit_command( struct i2c_command *cmd) { struct dce_i2c_hw *dce_i2c_hw; - struct dce_i2c_sw *dce_i2c_sw; + struct dce_i2c_sw dce_i2c_sw = {0}; if (!ddc) { BREAK_TO_DEBUGGER(); @@ -43,18 +43,15 @@ bool dce_i2c_submit_command( return false; } - /* The software engine is only available on dce8 */ - dce_i2c_sw = dce_i2c_acquire_i2c_sw_engine(pool, ddc); - - if (!dce_i2c_sw) { - dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc); - - if (!dce_i2c_hw) - return false; + dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc); + if (dce_i2c_hw) return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw); - } - return dce_i2c_submit_command_sw(pool, ddc, cmd, dce_i2c_sw); + dce_i2c_sw.ctx = ddc->ctx; + if (dce_i2c_engine_acquire_sw(&dce_i2c_sw, ddc)) { + return dce_i2c_submit_command_sw(pool, ddc, cmd, &dce_i2c_sw); + } + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c index a5a11c251e25..87d8428df6c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -73,31 +73,6 @@ static void release_engine_dce_sw( dce_i2c_sw->ddc = NULL; } -static bool get_hw_supported_ddc_line( - struct ddc *ddc, - enum gpio_ddc_line *line) -{ - enum gpio_ddc_line line_found; - - *line = GPIO_DDC_LINE_UNKNOWN; - - if (!ddc) { - BREAK_TO_DEBUGGER(); - return false; - } - - if (!ddc->hw_info.hw_supported) - return false; - - line_found = dal_ddc_get_line(ddc); - - if (line_found >= GPIO_DDC_LINE_COUNT) - return false; - - *line = line_found; - - return true; -} static bool wait_for_scl_high_sw( struct dc_context *ctx, struct ddc *ddc, @@ -524,21 +499,3 @@ bool dce_i2c_submit_command_sw( return result; } -struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine( - struct resource_pool *pool, - struct ddc *ddc) -{ - enum gpio_ddc_line line; - struct dce_i2c_sw *engine = NULL; - - if (get_hw_supported_ddc_line(ddc, &line)) - engine = pool->sw_i2cs[line]; - - if (!engine) - return NULL; - - if (!dce_i2c_engine_acquire_sw(engine, ddc)) - return NULL; - - return engine; -} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h index 5bbcdd455614..019fc47bb767 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h @@ -49,9 +49,9 @@ bool dce_i2c_submit_command_sw( struct i2c_command *cmd, struct dce_i2c_sw *dce_i2c_sw); -struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine( - struct resource_pool *pool, - struct ddc *ddc); +bool dce_i2c_engine_acquire_sw( + struct dce_i2c_sw *dce_i2c_sw, + struct ddc *ddc_handle); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index e73a65606a4a..d3d26796a9b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -59,6 +59,7 @@ #include "dml/display_mode_vba.h" #include "dcn20_dccg.h" #include "dcn20_vmid.h" +#include "dc_link_ddc.h" #include "navi10_ip_offset.h" @@ -1346,6 +1347,8 @@ static void destruct(struct dcn20_resource_pool *pool) if (pool->base.pp_smu != NULL) dcn20_pp_smu_destroy(&pool->base.pp_smu); + if (pool->base.oem_device != NULL) + dal_ddc_service_destroy(&pool->base.oem_device); } struct hubp *dcn20_hubp_create( @@ -3391,6 +3394,7 @@ static bool construct( int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data; struct _vcs_dpi_soc_bounding_box_st *loaded_bb = get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev); struct _vcs_dpi_ip_params_st *loaded_ip = @@ -3686,6 +3690,17 @@ static bool construct( dc->cap_funcs = cap_funcs; + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + } else { + pool->base.oem_device = NULL; + } + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c index 43a440385b43..2664cb22dfe7 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c @@ -110,6 +110,12 @@ static const struct ddc_registers ddc_data_regs_dcn[] = { ddc_data_regs_dcn2(4), ddc_data_regs_dcn2(5), ddc_data_regs_dcn2(6), + { + DDC_GPIO_VGA_REG_LIST(DATA), + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + } }; static const struct ddc_registers ddc_clk_regs_dcn[] = { @@ -119,6 +125,12 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = { ddc_clk_regs_dcn2(4), ddc_clk_regs_dcn2(5), ddc_clk_regs_dcn2(6), + { + DDC_GPIO_VGA_REG_LIST(CLK), + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + } }; static const struct ddc_sh_mask ddc_shift[] = { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index a831079607cd..fc9decc0a8fc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -229,6 +229,8 @@ struct resource_pool { const struct resource_funcs *funcs; const struct resource_caps *res_cap; + + struct ddc_service *oem_device; }; struct dcn_fe_bandwidth { diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index f312834fef50..d51de94e4bc3 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -178,7 +178,8 @@ struct dc_firmware_info { uint32_t default_engine_clk; /* in KHz */ uint32_t dp_phy_ref_clk; /* in KHz - DCE12 only */ uint32_t i2c_engine_ref_clk; /* in KHz - DCE12 only */ - + bool oem_i2c_present; + uint8_t oem_i2c_obj_id; }; -- cgit From 00ada9d14f386305abfad73665d235882768c78f Mon Sep 17 00:00:00 2001 From: Reza Amini Date: Thu, 17 Oct 2019 16:40:02 -0400 Subject: drm/amd/display: Unify all scaling when Integer Scaling enabled [why] We want to guarantee integer ratio scaling for all scaling modes. [how] Treat centered, fullscreen, preserve aspect ratio the same: scale the view as many times as possible, and fill in the rest with a black border. Signed-off-by: Reza Amini Reviewed-by: Aric Cyr Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 8 ++++++-- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 21 ++++----------------- drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 + 3 files changed, 11 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index aadd2a20fc65..0cef26941b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1552,7 +1552,10 @@ static enum surface_update_type get_scaling_info_update_type( if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width || u->scaling_info->clip_rect.height != u->surface->clip_rect.height || u->scaling_info->dst_rect.width != u->surface->dst_rect.width - || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) { + || u->scaling_info->dst_rect.height != u->surface->dst_rect.height + || u->scaling_info->scaling_quality.integer_scaling != + u->surface->scaling_quality.integer_scaling + ) { update_flags->bits.scaling_change = 1; if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width @@ -1673,7 +1676,8 @@ static enum surface_update_type check_update_surfaces_for_stream( union stream_update_flags *su_flags = &stream_update->stream->update_flags; if ((stream_update->src.height != 0 && stream_update->src.width != 0) || - (stream_update->dst.height != 0 && stream_update->dst.width != 0)) + (stream_update->dst.height != 0 && stream_update->dst.width != 0) || + stream_update->integer_scaling_update) su_flags->bits.scaling = 1; if (stream_update->out_transfer_func) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 37698305a2dc..30f5434fa8e9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -951,25 +951,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); } -static bool are_rects_integer_multiples(struct rect src, struct rect dest) -{ - if (dest.width >= src.width && dest.width % src.width == 0 && - dest.height >= src.height && dest.height % src.height == 0) - return true; - - return false; -} static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx) { - if (!pipe_ctx->plane_state->scaling_quality.integer_scaling) - return; + unsigned int integer_multiple = 1; - //for Centered Mode - if (pipe_ctx->stream->dst.width == pipe_ctx->stream->src.width && - pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) { + if (pipe_ctx->plane_state->scaling_quality.integer_scaling) { // calculate maximum # of replication of src onto addressable - unsigned int integer_multiple = min( + integer_multiple = min( pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width, pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height); @@ -980,10 +969,8 @@ static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx) //center dst onto addressable pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2; pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2; - } - //disable taps if src & dst are integer ratio - if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) { + //We are guaranteed that we are scaling in integer ratio pipe_ctx->plane_state->scaling_quality.v_taps = 1; pipe_ctx->plane_state->scaling_quality.h_taps = 1; pipe_ctx->plane_state->scaling_quality.v_taps_c = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index f8c07d5a4054..70274fc43a72 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -252,6 +252,7 @@ struct dc_stream_update { struct dc_info_packet *vsp_infopacket; bool *dpms_off; + bool integer_scaling_update; struct colorspace_transform *gamut_remap; enum dc_color_space *output_color_space; -- cgit From 8e5100a575433cc185a2e224280fbd873b6692dd Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Fri, 19 Jul 2019 10:25:39 -0400 Subject: drm/amd/display: initialize lttpr [Description] When reading link, update the procedure as follows: 1-Set aux timeout to extended: 3.2ms 2-Start with reading lttpr caps 3-Determine if lttpr support should be enabled. Reset aux timeout to 400us if no repeater is found. Signed-off-by: abdoulaye berthe Reviewed-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 56 ++++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc.h | 2 + drivers/gpu/drm/amd/display/dc/dc_link.h | 1 + drivers/gpu/drm/amd/display/dc/dc_types.h | 36 +++++++++++++++ 4 files changed, 95 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 0f59b68aa4c2..2a89f90ef7a7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -21,6 +21,9 @@ #define DC_LOGGER \ link->ctx->logger + +#define DP_REPEATER_CONFIGURATION_AND_STATUS_OFFSET 0x50 + /* maximum pre emphasis level allowed for each voltage swing level*/ static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3, @@ -2753,6 +2756,14 @@ static bool retrieve_link_cap(struct dc_link *link) int i; struct dp_sink_hw_fw_revision dp_hw_fw_revision; + /* Set default timeout to 3.2ms and read LTTPR capabilities */ + bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support && + !link->dc->config.disable_extended_timeout_support; + if (ext_timeout_support) { + status = dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD); + link->is_lttpr_mode_transparent = true; + } + memset(dpcd_data, '\0', sizeof(dpcd_data)); memset(&down_strm_port_count, '\0', sizeof(union down_stream_port_count)); @@ -2785,6 +2796,51 @@ static bool retrieve_link_cap(struct dc_link *link) return false; } + if (ext_timeout_support) { + status = core_link_read_dpcd( + link, + DP_PHY_REPEATER_CNT, + &link->dpcd_caps.lttpr_caps.phy_repeater_cnt, + sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt)); + + if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0) { + + link->is_lttpr_mode_transparent = false; + + status = core_link_read_dpcd( + link, + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, + (uint8_t *)&link->dpcd_caps.lttpr_caps.revision, + sizeof(link->dpcd_caps.lttpr_caps.revision)); + + status = core_link_read_dpcd( + link, + DP_MAX_LINK_RATE_PHY_REPEATER, + &link->dpcd_caps.lttpr_caps.max_link_rate, + sizeof(link->dpcd_caps.lttpr_caps.max_link_rate)); + + status = core_link_read_dpcd( + link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&link->dpcd_caps.lttpr_caps.mode, + sizeof(link->dpcd_caps.lttpr_caps.mode)); + + status = core_link_read_dpcd( + link, + DP_MAX_LANE_COUNT_PHY_REPEATER, + &link->dpcd_caps.lttpr_caps.max_lane_count, + sizeof(link->dpcd_caps.lttpr_caps.max_lane_count)); + + status = core_link_read_dpcd( + link, + DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, + &link->dpcd_caps.lttpr_caps.max_ext_timeout, + sizeof(link->dpcd_caps.lttpr_caps.max_ext_timeout)); + } else { + dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + } + } + { union training_aux_rd_interval aux_rd_interval; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index f12ad4b17781..8ff7556eb2c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -994,6 +994,8 @@ struct dpcd_caps { union dpcd_fec_capability fec_cap; struct dpcd_dsc_capabilities dsc_caps; #endif + struct dc_lttpr_caps lttpr_caps; + }; #include "dc_link.h" diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 9270e43cd5bb..67ba6666a324 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -85,6 +85,7 @@ struct dc_link { bool link_state_valid; bool aux_access_disabled; bool sync_lt_in_progress; + bool is_lttpr_mode_transparent; /* caps is the same as reported_link_cap. link_traing use * reported_link_cap. Will clean up. TODO diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 7ab7644458e7..837859e65e45 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -122,6 +122,7 @@ struct dc_context { #define DC_EDID_BLOCK_SIZE 128 #define MAX_SURFACE_NUM 4 #define NUM_PIXEL_FORMATS 10 +#define MAX_REPEATER_CNT 8 #include "dc_ddc_types.h" @@ -405,6 +406,41 @@ enum dpcd_downstream_port_max_bpc { DOWN_STREAM_MAX_12BPC, DOWN_STREAM_MAX_16BPC }; + + +enum link_training_offset { + DPRX = 0, + LTTPR_PHY_REPEATER1 = 1, + LTTPR_PHY_REPEATER2 = 2, + LTTPR_PHY_REPEATER3 = 3, + LTTPR_PHY_REPEATER4 = 4, + LTTPR_PHY_REPEATER5 = 5, + LTTPR_PHY_REPEATER6 = 6, + LTTPR_PHY_REPEATER7 = 7, + LTTPR_PHY_REPEATER8 = 8 +}; + +enum lttpr_mode { + phy_repeater_mode_transparent = 0x55, + phy_repeater_mode_non_transparent = 0xAA +}; + +enum lttpr_rev { + lttpr_rev_unknown = 0x0, + lttpr_rev_14 = 0x14, + lttpr_rev_max = 0x20 +}; + +struct dc_lttpr_caps { + enum lttpr_rev revision; + enum lttpr_mode mode; + uint8_t max_lane_count; + uint8_t max_link_rate; + uint8_t phy_repeater_cnt; + uint8_t max_ext_timeout; + uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; +}; + struct dc_dongle_caps { /* dongle type (DP converter, CV smart dongle) */ enum display_dongle_type dongle_type; -- cgit From 903e859b72957985c60de593f364e33639964829 Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Wed, 18 Sep 2019 11:57:47 -0400 Subject: drm/amd/display: check for dp rev before reading lttpr regs [Why] LTTPR was introduced after DP1.2. Reading LTTPR registers 0xFXXXX on some DP 1.2 display is causing an unexpected behavior. [How] Make sure that we don't read any lttpr registers on 1.2 displays. Signed-off-by: abdoulaye berthe Reviewed-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 2a89f90ef7a7..1e4480f3bd3c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2759,9 +2759,10 @@ static bool retrieve_link_cap(struct dc_link *link) /* Set default timeout to 3.2ms and read LTTPR capabilities */ bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support && !link->dc->config.disable_extended_timeout_support; + link->is_lttpr_mode_transparent = true; + if (ext_timeout_support) { status = dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD); - link->is_lttpr_mode_transparent = true; } memset(dpcd_data, '\0', sizeof(dpcd_data)); @@ -2796,7 +2797,7 @@ static bool retrieve_link_cap(struct dc_link *link) return false; } - if (ext_timeout_support) { + if (ext_timeout_support && link->dpcd_caps.dpcd_rev.raw >= 0x14) { status = core_link_read_dpcd( link, DP_PHY_REPEATER_CNT, -- cgit From bad7ab0be9bea2a4128158751bc29ac4b1c3bce2 Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Fri, 19 Jul 2019 10:43:42 -0400 Subject: drm/amd/display: configure lttpr mode [Description] 1-Grant extended timeout request. Done once after detection 2-Configure lttpr mode based on lttpr support before LT 3-Account for lttpr cap when determining max link settings Signed-off-by: abdoulaye berthe Reviewed-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 42 ++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 1e4480f3bd3c..94d5a0ac308f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1057,6 +1057,26 @@ static void initialize_training_settings( lt_settings->enhanced_framing = 1; } +static void configure_lttpr_mode(struct dc_link *link) +{ + /* aux timeout is already set to extended */ + /* RESET/SET lttpr mode to enable non transparent mode */ + enum lttpr_mode repeater_mode = phy_repeater_mode_transparent; + + core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + if (!link->is_lttpr_mode_transparent) { + repeater_mode = phy_repeater_mode_non_transparent; + core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + } +} + static void print_status_message( struct dc_link *link, const struct link_training_settings *lt_settings, @@ -1210,6 +1230,9 @@ enum link_training_result dc_link_dp_perform_link_training( dp_set_fec_ready(link, fec_enable); #endif + /* Configure lttpr mode */ + if (!link->is_lttpr_mode_transparent) + configure_lttpr_mode(link); /* 2. perform link training (set link training done * to false is done as well) @@ -1426,6 +1449,17 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) max_link_cap.link_spread) max_link_cap.link_spread = link->reported_link_cap.link_spread; + /* + * account for lttpr repeaters cap + * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). + */ + if (!link->is_lttpr_mode_transparent) { + if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + + if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + } return max_link_cap; } @@ -1571,6 +1605,13 @@ bool dp_verify_link_cap( max_link_cap = get_max_link_cap(link); + /* Grant extended timeout request */ + if (!link->is_lttpr_mode_transparent && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { + uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; + + core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); + } + /* TODO implement override and monitor patch later */ /* try to train the link from high to low to @@ -2759,6 +2800,7 @@ static bool retrieve_link_cap(struct dc_link *link) /* Set default timeout to 3.2ms and read LTTPR capabilities */ bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support && !link->dc->config.disable_extended_timeout_support; + link->is_lttpr_mode_transparent = true; if (ext_timeout_support) { -- cgit From 64c12b733fe7eaffa9207e6f30b313595c6e6597 Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Wed, 24 Jul 2019 11:01:44 -0400 Subject: drm/amd/display: implement lttpr logic 1-If at least one repeater is present in the link and we are in non transparent mode, perform clock recovery then channel equalization with all repeaters one by one before training DPRX. 2-Mark the end of LT with a repeater by setting training pattern 0 at the end of channel equalization with each repeater. Signed-off-by: abdoulaye berthe Reviewed-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 319 ++++++++++++++++----- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 39 ++- drivers/gpu/drm/amd/display/dc/inc/link_hwss.h | 6 +- 3 files changed, 292 insertions(+), 72 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 94d5a0ac308f..11b6e14b345e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -22,7 +22,7 @@ link->ctx->logger -#define DP_REPEATER_CONFIGURATION_AND_STATUS_OFFSET 0x50 +#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 /* maximum pre emphasis level allowed for each voltage swing level*/ static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { @@ -224,19 +224,31 @@ static enum dpcd_training_patterns return dpcd_tr_pattern; } +static inline bool is_repeater(struct dc_link *link, uint32_t offset) +{ + return (!link->is_lttpr_mode_transparent && offset != 0); +} + static void dpcd_set_lt_pattern_and_lane_settings( struct dc_link *link, const struct link_training_settings *lt_settings, - enum dc_dp_training_pattern pattern) + enum dc_dp_training_pattern pattern, + uint32_t offset) { union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } }; - const uint32_t dpcd_base_lt_offset = - DP_TRAINING_PATTERN_SET; + + uint32_t dpcd_base_lt_offset; + uint8_t dpcd_lt_buffer[5] = {0}; union dpcd_training_pattern dpcd_pattern = { {0} }; uint32_t lane; uint32_t size_in_bytes; bool edp_workaround = false; /* TODO link_prop.INTERNAL */ + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; + + if (is_repeater(link, offset)) + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); /***************************************************************** * DpcdAddress_TrainingPatternSet @@ -244,12 +256,12 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); - dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset] + dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] = dpcd_pattern.raw; - DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", __func__, - DP_TRAINING_PATTERN_SET, + dpcd_base_lt_offset, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); /***************************************************************** @@ -271,19 +283,19 @@ static void dpcd_set_lt_pattern_and_lane_settings( PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); } - /* concatinate everything into one buffer*/ + /* concatenate everything into one buffer*/ size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]); // 0x00103 - 0x00102 memmove( - &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset], + &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], dpcd_lane, size_in_bytes); - DC_LOG_HW_LINK_TRAINING("%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, - DP_TRAINING_LANE0_SET, + dpcd_base_lt_offset, dpcd_lane[0].bits.VOLTAGE_SWING_SET, dpcd_lane[0].bits.PRE_EMPHASIS_SET, dpcd_lane[0].bits.MAX_SWING_REACHED, @@ -498,8 +510,12 @@ static void get_lane_status_and_drive_settings( const struct link_training_settings *link_training_setting, union lane_status *ln_status, union lane_align_status_updated *ln_status_updated, - struct link_training_settings *req_settings) + struct link_training_settings *req_settings, + uint32_t offset) { + unsigned int lane01_status_address = DP_LANE0_1_STATUS; + uint8_t lane_adjust_offset = 4; + unsigned int lane01_adjust_address; uint8_t dpcd_buf[6] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; struct link_training_settings request_settings = { {0} }; @@ -507,9 +523,16 @@ static void get_lane_status_and_drive_settings( memset(req_settings, '\0', sizeof(struct link_training_settings)); + if (is_repeater(link, offset)) { + lane01_status_address = + DP_LANE0_1_STATUS_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + lane_adjust_offset = 3; + } + core_link_read_dpcd( link, - DP_LANE0_1_STATUS, + lane01_status_address, (uint8_t *)(dpcd_buf), sizeof(dpcd_buf)); @@ -520,22 +543,28 @@ static void get_lane_status_and_drive_settings( ln_status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane); dpcd_lane_adjust[lane].raw = - get_nibble_at_index(&dpcd_buf[4], lane); + get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); } ln_status_updated->raw = dpcd_buf[2]; - DC_LOG_HW_LINK_TRAINING("%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ", + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", __func__, - DP_LANE0_1_STATUS, dpcd_buf[0], - DP_LANE2_3_STATUS, dpcd_buf[1]); + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; + + if (is_repeater(link, offset)) + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - DC_LOG_HW_LINK_TRAINING("%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n", + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", __func__, - DP_ADJUST_REQUEST_LANE0_1, - dpcd_buf[4], - DP_ADJUST_REQUEST_LANE2_3, - dpcd_buf[5]); + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); /*copy to req_settings*/ request_settings.link_settings.lane_count = @@ -574,10 +603,18 @@ static void get_lane_status_and_drive_settings( static void dpcd_set_lane_settings( struct dc_link *link, - const struct link_training_settings *link_training_setting) + const struct link_training_settings *link_training_setting, + uint32_t offset) { union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}}; uint32_t lane; + unsigned int lane0_set_address; + + lane0_set_address = DP_TRAINING_LANE0_SET; + + if (is_repeater(link, offset)) + lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); for (lane = 0; lane < (uint32_t)(link_training_setting-> @@ -600,7 +637,7 @@ static void dpcd_set_lane_settings( } core_link_write_dpcd(link, - DP_TRAINING_LANE0_SET, + lane0_set_address, (uint8_t *)(dpcd_lane), link_training_setting->link_settings.lane_count); @@ -623,9 +660,9 @@ static void dpcd_set_lane_settings( } */ - DC_LOG_HW_LINK_TRAINING("%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, - DP_TRAINING_LANE0_SET, + lane0_set_address, dpcd_lane[0].bits.VOLTAGE_SWING_SET, dpcd_lane[0].bits.PRE_EMPHASIS_SET, dpcd_lane[0].bits.MAX_SWING_REACHED, @@ -650,17 +687,6 @@ static bool is_max_vs_reached( } -void dc_link_dp_set_drive_settings( - struct dc_link *link, - struct link_training_settings *lt_settings) -{ - /* program ASIC PHY settings*/ - dp_set_hw_lane_settings(link, lt_settings); - - /* Notify DP sink the PHY settings from source */ - dpcd_set_lane_settings(link, lt_settings); -} - static bool perform_post_lt_adj_req_sequence( struct dc_link *link, struct link_training_settings *lt_settings) @@ -693,7 +719,8 @@ static bool perform_post_lt_adj_req_sequence( lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings); + &req_settings, + DPRX); if (dpcd_lane_status_updated.bits. POST_LT_ADJ_REQ_IN_PROGRESS == 0) @@ -750,6 +777,31 @@ static bool perform_post_lt_adj_req_sequence( } +/* Only used for channel equalization */ +static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) +{ + unsigned int aux_rd_interval_us = 400; + + switch (dpcd_aux_read_interval) { + case 0x01: + aux_rd_interval_us = 400; + break; + case 0x02: + aux_rd_interval_us = 4000; + break; + case 0x03: + aux_rd_interval_us = 8000; + break; + case 0x04: + aux_rd_interval_us = 16000; + break; + default: + break; + } + + return aux_rd_interval_us; +} + static enum link_training_result get_cr_failure(enum dc_lane_count ln_count, union lane_status *dpcd_lane_status) { @@ -768,37 +820,55 @@ static enum link_training_result get_cr_failure(enum dc_lane_count ln_count, static enum link_training_result perform_channel_equalization_sequence( struct dc_link *link, - struct link_training_settings *lt_settings) + struct link_training_settings *lt_settings, + uint32_t offset) { struct link_training_settings req_settings; enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; + uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = { {0} }; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; - dp_set_hw_training_pattern(link, tr_pattern); + if (is_repeater(link, offset)) + tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + + dp_set_hw_training_pattern(link, tr_pattern, offset); for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; retries_ch_eq++) { - dp_set_hw_lane_settings(link, lt_settings); + dp_set_hw_lane_settings(link, lt_settings, offset); /* 2. update DPCD*/ if (!retries_ch_eq) /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration*/ + * but only for the 1-st iteration + */ + dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, - tr_pattern); + tr_pattern, offset); else - dpcd_set_lane_settings(link, lt_settings); + dpcd_set_lane_settings(link, lt_settings, offset); /* 3. wait for receiver to lock-on*/ - wait_for_training_aux_rd_interval(link, lt_settings->eq_pattern_time); + wait_time_microsec = lt_settings->eq_pattern_time; + + if (!link->is_lttpr_mode_transparent) + wait_time_microsec = + translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[offset]); + + wait_for_training_aux_rd_interval( + link, + wait_time_microsec); /* 4. Read lane status and requested * drive settings as set by the sink*/ @@ -808,7 +878,8 @@ static enum link_training_result perform_channel_equalization_sequence( lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings); + &req_settings, + offset); /* 5. check CR done*/ if (!is_cr_done(lane_count, dpcd_lane_status)) @@ -827,13 +898,16 @@ static enum link_training_result perform_channel_equalization_sequence( return LINK_TRAINING_EQ_FAIL_EQ; } +#define TRAINING_AUX_RD_INTERVAL 100 //us static enum link_training_result perform_clock_recovery_sequence( struct dc_link *link, - struct link_training_settings *lt_settings) + struct link_training_settings *lt_settings, + uint32_t offset) { uint32_t retries_cr; uint32_t retry_count; + uint32_t wait_time_microsec; struct link_training_settings req_settings; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1; @@ -843,7 +917,7 @@ static enum link_training_result perform_clock_recovery_sequence( retries_cr = 0; retry_count = 0; - dp_set_hw_training_pattern(link, tr_pattern); + dp_set_hw_training_pattern(link, tr_pattern, offset); /* najeeb - The synaptics MST hub can put the LT in * infinite loop by switching the VS @@ -860,7 +934,8 @@ static enum link_training_result perform_clock_recovery_sequence( /* 1. call HWSS to set lane settings*/ dp_set_hw_lane_settings( link, - lt_settings); + lt_settings, + offset); /* 2. update DPCD of the receiver*/ if (!retries_cr) @@ -869,16 +944,23 @@ static enum link_training_result perform_clock_recovery_sequence( dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, - tr_pattern); + tr_pattern, + offset); else dpcd_set_lane_settings( link, - lt_settings); + lt_settings, + offset); /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + if (!link->is_lttpr_mode_transparent) + wait_time_microsec = TRAINING_AUX_RD_INTERVAL; + wait_for_training_aux_rd_interval( link, - lt_settings->cr_pattern_time); + wait_time_microsec); /* 4. Read lane status and requested drive * settings as set by the sink @@ -888,7 +970,8 @@ static enum link_training_result perform_clock_recovery_sequence( lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings); + &req_settings, + offset); /* 5. check CR done*/ if (is_cr_done(lane_count, dpcd_lane_status)) @@ -1057,10 +1140,38 @@ static void initialize_training_settings( lt_settings->enhanced_framing = 1; } +static uint8_t convert_to_count(uint8_t lttpr_repeater_count) +{ + switch (lttpr_repeater_count) { + case 0x80: // 1 lttpr repeater + return 1; + case 0x40: // 2 lttpr repeaters + return 2; + case 0x20: // 3 lttpr repeaters + return 3; + case 0x10: // 4 lttpr repeaters + return 4; + case 0x08: // 5 lttpr repeaters + return 5; + case 0x04: // 6 lttpr repeaters + return 6; + case 0x02: // 7 lttpr repeaters + return 7; + case 0x01: // 8 lttpr repeaters + return 8; + default: + break; + } + return 0; // invalid value +} + static void configure_lttpr_mode(struct dc_link *link) { /* aux timeout is already set to extended */ /* RESET/SET lttpr mode to enable non transparent mode */ + uint8_t repeater_cnt; + uint32_t aux_interval_address; + uint8_t repeater_id; enum lttpr_mode repeater_mode = phy_repeater_mode_transparent; core_link_write_dpcd(link, @@ -1074,9 +1185,43 @@ static void configure_lttpr_mode(struct dc_link *link) DP_PHY_REPEATER_MODE, (uint8_t *)&repeater_mode, sizeof(repeater_mode)); + + repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { + aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); + core_link_read_dpcd( + link, + aux_interval_address, + (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], + sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); + link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; + } } } +static void repeater_training_done(struct dc_link *link, uint32_t offset) +{ + union dpcd_training_pattern dpcd_pattern = { {0} }; + + const uint32_t dpcd_base_lt_offset = + DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + /* Set training not in progress*/ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; + + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + &dpcd_pattern.raw, + 1); + + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + static void print_status_message( struct dc_link *link, const struct link_training_settings *lt_settings, @@ -1156,6 +1301,17 @@ static void print_status_message( lt_spread); } +void dc_link_dp_set_drive_settings( + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + /* program ASIC PHY settings*/ + dp_set_hw_lane_settings(link, lt_settings, DPRX); + + /* Notify DP sink the PHY settings from source */ + dpcd_set_lane_settings(link, lt_settings, DPRX); +} + bool dc_link_dp_perform_link_training_skip_aux( struct dc_link *link, const struct dc_link_settings *link_setting) @@ -1172,10 +1328,10 @@ bool dc_link_dp_perform_link_training_skip_aux( /* 1. Perform_clock_recovery_sequence. */ /* transmit training pattern for clock recovery */ - dp_set_hw_training_pattern(link, pattern_for_cr); + dp_set_hw_training_pattern(link, pattern_for_cr, DPRX); /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, <_settings); + dp_set_hw_lane_settings(link, <_settings, DPRX); /* wait receiver to lock-on*/ wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); @@ -1183,10 +1339,10 @@ bool dc_link_dp_perform_link_training_skip_aux( /* 2. Perform_channel_equalization_sequence. */ /* transmit training pattern for channel equalization. */ - dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq); + dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX); /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, <_settings); + dp_set_hw_lane_settings(link, <_settings, DPRX); /* wait receiver to lock-on. */ wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); @@ -1208,9 +1364,12 @@ enum link_training_result dc_link_dp_perform_link_training( { enum link_training_result status = LINK_TRAINING_SUCCESS; struct link_training_settings lt_settings; + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool fec_enable; #endif + uint8_t repeater_cnt; + uint8_t repeater_id; initialize_training_settings( link, @@ -1230,17 +1389,40 @@ enum link_training_result dc_link_dp_perform_link_training( dp_set_fec_ready(link, fec_enable); #endif - /* Configure lttpr mode */ - if (!link->is_lttpr_mode_transparent) + if (!link->is_lttpr_mode_transparent) { + /* Configure lttpr mode */ configure_lttpr_mode(link); - /* 2. perform link training (set link training done - * to false is done as well) - */ - status = perform_clock_recovery_sequence(link, <_settings); + /* 2. perform link training (set link training done + * to false is done as well) + */ + repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); + repeater_id--) { + status = perform_clock_recovery_sequence(link, <_settings, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + status = perform_channel_equalization_sequence(link, + <_settings, + repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + repeater_training_done(link, repeater_id); + } + } + + if (status == LINK_TRAINING_SUCCESS) { + status = perform_clock_recovery_sequence(link, <_settings, DPRX); if (status == LINK_TRAINING_SUCCESS) { status = perform_channel_equalization_sequence(link, - <_settings); + <_settings, + DPRX); + } } if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) { @@ -1393,10 +1575,11 @@ enum link_training_result dc_link_dp_sync_lt_attempt( /* 2. perform link training (set link training done * to false is done as well) */ - lt_status = perform_clock_recovery_sequence(link, <_settings); + lt_status = perform_clock_recovery_sequence(link, <_settings, DPRX); if (lt_status == LINK_TRAINING_SUCCESS) { lt_status = perform_channel_equalization_sequence(link, - <_settings); + <_settings, + DPRX); } /* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/ @@ -3355,8 +3538,8 @@ bool dc_link_dp_set_test_pattern( if (is_dp_phy_pattern(test_pattern)) { /* Set DPCD Lane Settings before running test pattern */ if (p_link_settings != NULL) { - dp_set_hw_lane_settings(link, p_link_settings); - dpcd_set_lane_settings(link, p_link_settings); + dp_set_hw_lane_settings(link, p_link_settings, DPRX); + dpcd_set_lane_settings(link, p_link_settings, DPRX); } /* Blank stream if running test pattern */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index a519dbc5ecb6..5efbdc1eb173 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -19,6 +19,36 @@ #include "resource.h" #endif +static uint8_t convert_to_count(uint8_t lttpr_repeater_count) +{ + switch (lttpr_repeater_count) { + case 0x80: // 1 lttpr repeater + return 1; + case 0x40: // 2 lttpr repeaters + return 2; + case 0x20: // 3 lttpr repeaters + return 3; + case 0x10: // 4 lttpr repeaters + return 4; + case 0x08: // 5 lttpr repeaters + return 5; + case 0x04: // 6 lttpr repeaters + return 6; + case 0x02: // 7 lttpr repeaters + return 7; + case 0x01: // 8 lttpr repeaters + return 8; + default: + break; + } + return 0; // invalid value +} + +static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) +{ + return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); +} + enum dc_status core_link_read_dpcd( struct dc_link *link, uint32_t address, @@ -212,7 +242,8 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal) bool dp_set_hw_training_pattern( struct dc_link *link, - enum dc_dp_training_pattern pattern) + enum dc_dp_training_pattern pattern, + uint32_t offset) { enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; @@ -240,10 +271,14 @@ bool dp_set_hw_training_pattern( void dp_set_hw_lane_settings( struct dc_link *link, - const struct link_training_settings *link_settings) + const struct link_training_settings *link_settings, + uint32_t offset) { struct link_encoder *encoder = link->link_enc; + if (!link->is_lttpr_mode_transparent && !is_immediate_downstream(link, offset)) + return; + /* call Encoder to set lane settings */ encoder->funcs->dp_set_lane_settings(encoder, link_settings); } diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index 4eff5d38a2f9..9af7ee5bc8ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -60,11 +60,13 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal); bool dp_set_hw_training_pattern( struct dc_link *link, - enum dc_dp_training_pattern pattern); + enum dc_dp_training_pattern pattern, + uint32_t offset); void dp_set_hw_lane_settings( struct dc_link *link, - const struct link_training_settings *link_settings); + const struct link_training_settings *link_settings, + uint32_t offset); void dp_set_hw_test_pattern( struct dc_link *link, -- cgit From 9bffd0806d80de9c189b8cf69b3022783e5c8f2a Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Thu, 19 Sep 2019 15:51:00 -0400 Subject: drm/amd/display: use previous aux timeout val if no repeater. [Why] The aux timeout value is not default before reading link cap. Setting it to default when lttpr is not enabled causes some monitor not to light up. [How] Read the aux engine timeout value before setting it to extended. Set the aux engine timeout to its previous value if no lttpr. Signed-off-by: abdoulaye berthe Reviewed-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 13 +++---- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 9 +++-- drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 46 ++++++++++++++++++----- drivers/gpu/drm/amd/display/dc/dce/dce_aux.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h | 2 +- 6 files changed, 52 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index a2e6adacaff6..c8037af93e0a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -650,17 +650,16 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, } -enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, +uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc, uint32_t timeout) { - enum dc_status status = DC_OK; + uint32_t prev_timeout = 0; struct ddc *ddc_pin = ddc->ddc_pin; - if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL) - return DC_ERROR_UNEXPECTED; - if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout)) - status = DC_ERROR_UNEXPECTED; - return status; + if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout) + prev_timeout = + ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout); + return prev_timeout; } /*test only function*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 11b6e14b345e..6e1f00ab6646 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2977,6 +2977,7 @@ static bool retrieve_link_cap(struct dc_link *link) union dp_downstream_port_present ds_port = { 0 }; enum dc_status status = DC_ERROR_UNEXPECTED; uint32_t read_dpcd_retry_cnt = 3; + uint32_t prev_timeout_val; int i; struct dp_sink_hw_fw_revision dp_hw_fw_revision; @@ -2987,7 +2988,9 @@ static bool retrieve_link_cap(struct dc_link *link) link->is_lttpr_mode_transparent = true; if (ext_timeout_support) { - status = dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD); + prev_timeout_val = + dc_link_aux_configure_timeout(link->ddc, + LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD); } memset(dpcd_data, '\0', sizeof(dpcd_data)); @@ -3022,7 +3025,7 @@ static bool retrieve_link_cap(struct dc_link *link) return false; } - if (ext_timeout_support && link->dpcd_caps.dpcd_rev.raw >= 0x14) { + if (ext_timeout_support) { status = core_link_read_dpcd( link, DP_PHY_REPEATER_CNT, @@ -3063,7 +3066,7 @@ static bool retrieve_link_cap(struct dc_link *link) &link->dpcd_caps.lttpr_caps.max_ext_timeout, sizeof(link->dpcd_caps.lttpr_caps.max_ext_timeout)); } else { - dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + dc_link_aux_configure_timeout(link->ddc, prev_timeout_val); } } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index e472608faf33..f7626cd70ec8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -60,12 +60,14 @@ enum { AUX_DEFER_RETRY_COUNTER = 6 }; -#define TIME_OUT_INCREMENT 1016 -#define TIME_OUT_MULTIPLIER_8 8 -#define TIME_OUT_MULTIPLIER_16 16 -#define TIME_OUT_MULTIPLIER_32 32 -#define TIME_OUT_MULTIPLIER_64 64 -#define MAX_TIMEOUT_LENGTH 127 +#define TIME_OUT_INCREMENT 1016 +#define TIME_OUT_MULTIPLIER_8 8 +#define TIME_OUT_MULTIPLIER_16 16 +#define TIME_OUT_MULTIPLIER_32 32 +#define TIME_OUT_MULTIPLIER_64 64 +#define MAX_TIMEOUT_LENGTH 127 +#define DEFAULT_AUX_ENGINE_MULT 0 +#define DEFAULT_AUX_ENGINE_LENGTH 69 static void release_engine( struct dce_aux *engine) @@ -427,11 +429,14 @@ void dce110_engine_destroy(struct dce_aux **engine) } -static bool dce_aux_configure_timeout(struct ddc_service *ddc, +static uint32_t dce_aux_configure_timeout(struct ddc_service *ddc, uint32_t timeout_in_us) { uint32_t multiplier = 0; uint32_t length = 0; + uint32_t prev_length = 0; + uint32_t prev_mult = 0; + uint32_t prev_timeout_val = 0; struct ddc *ddc_pin = ddc->ddc_pin; struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); @@ -440,7 +445,10 @@ static bool dce_aux_configure_timeout(struct ddc_service *ddc, aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER; /* 2-Update aux timeout period length and multiplier */ - if (timeout_in_us <= TIME_OUT_INCREMENT) { + if (timeout_in_us == 0) { + multiplier = DEFAULT_AUX_ENGINE_MULT; + length = DEFAULT_AUX_ENGINE_LENGTH; + } else if (timeout_in_us <= TIME_OUT_INCREMENT) { multiplier = 0; length = timeout_in_us/TIME_OUT_MULTIPLIER_8; if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0) @@ -464,9 +472,29 @@ static bool dce_aux_configure_timeout(struct ddc_service *ddc, length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH; + REG_GET_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, &prev_length, AUX_RX_TIMEOUT_LEN_MUL, &prev_mult); + + switch (prev_mult) { + case 0: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_8; + break; + case 1: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_16; + break; + case 2: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_32; + break; + case 3: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_64; + break; + default: + prev_timeout_val = DEFAULT_AUX_ENGINE_LENGTH * TIME_OUT_MULTIPLIER_8; + break; + } + REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier); - return true; + return prev_timeout_val; } static struct dce_aux_funcs aux_functions = { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index b4b2c79a8073..2e2e925a506b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -311,7 +311,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *cmd); struct dce_aux_funcs { - bool (*configure_timeout) + uint32_t (*configure_timeout) (struct ddc_service *ddc, uint32_t timeout); void (*destroy) diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 14716ba35662..de2d160114db 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -105,7 +105,7 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *payload); -enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, +uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc, uint32_t timeout); void dal_ddc_service_write_scdc_data( diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 045138dbdccb..a6500b98fe0d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -28,7 +28,7 @@ #define LINK_TRAINING_ATTEMPTS 4 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ -#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/ +#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 3200 /*us*/ #define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/ struct dc_link; -- cgit From 61aa7a6f760e78e50fad708029fc3aa201ec7a89 Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Thu, 10 Oct 2019 16:41:52 -0400 Subject: drm/amd/display: disable lttpr for invalid lttpr caps. 1-Read lttpr caps in 5-bytes 2-Parse caps 3-Validate caps and set lttpr_mode 4-Use hw default timeout when lttpr is disabled. Signed-off-by: abdoulaye berthe Reviewed-by: Wenjing Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 90 ++++++++++++------------ drivers/gpu/drm/amd/display/dc/dc_types.h | 15 +--- drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h | 2 +- 3 files changed, 49 insertions(+), 58 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 6e1f00ab6646..7d18fc1e68c6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1172,7 +1172,7 @@ static void configure_lttpr_mode(struct dc_link *link) uint8_t repeater_cnt; uint32_t aux_interval_address; uint8_t repeater_id; - enum lttpr_mode repeater_mode = phy_repeater_mode_transparent; + uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, @@ -1180,7 +1180,7 @@ static void configure_lttpr_mode(struct dc_link *link) sizeof(repeater_mode)); if (!link->is_lttpr_mode_transparent) { - repeater_mode = phy_repeater_mode_non_transparent; + repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, (uint8_t *)&repeater_mode, @@ -2964,7 +2964,11 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, static bool retrieve_link_cap(struct dc_link *link) { - uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1]; + /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, + * which means size 16 will be good for both of those DPCD register block reads + */ + uint8_t dpcd_data[16]; + uint8_t lttpr_dpcd_data[6]; /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. */ @@ -2977,7 +2981,6 @@ static bool retrieve_link_cap(struct dc_link *link) union dp_downstream_port_present ds_port = { 0 }; enum dc_status status = DC_ERROR_UNEXPECTED; uint32_t read_dpcd_retry_cnt = 3; - uint32_t prev_timeout_val; int i; struct dp_sink_hw_fw_revision dp_hw_fw_revision; @@ -2988,12 +2991,12 @@ static bool retrieve_link_cap(struct dc_link *link) link->is_lttpr_mode_transparent = true; if (ext_timeout_support) { - prev_timeout_val = - dc_link_aux_configure_timeout(link->ddc, - LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD); + dc_link_aux_configure_timeout(link->ddc, + LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD); } memset(dpcd_data, '\0', sizeof(dpcd_data)); + memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data)); memset(&down_strm_port_count, '\0', sizeof(union down_stream_port_count)); memset(&edp_config_cap, '\0', @@ -3026,47 +3029,46 @@ static bool retrieve_link_cap(struct dc_link *link) } if (ext_timeout_support) { + status = core_link_read_dpcd( link, - DP_PHY_REPEATER_CNT, - &link->dpcd_caps.lttpr_caps.phy_repeater_cnt, - sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt)); - - if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0) { - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, + lttpr_dpcd_data, + sizeof(lttpr_dpcd_data)); + + link->dpcd_caps.lttpr_caps.revision.raw = + lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_link_rate = + lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.phy_repeater_cnt = + lttpr_dpcd_data[DP_PHY_REPEATER_CNT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_lane_count = + lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.mode = + lttpr_dpcd_data[DP_PHY_REPEATER_MODE - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_ext_timeout = + lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 && + link->dpcd_caps.lttpr_caps.max_lane_count > 0 && + link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && + link->dpcd_caps.lttpr_caps.revision.raw >= 0x14) { link->is_lttpr_mode_transparent = false; - - status = core_link_read_dpcd( - link, - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, - (uint8_t *)&link->dpcd_caps.lttpr_caps.revision, - sizeof(link->dpcd_caps.lttpr_caps.revision)); - - status = core_link_read_dpcd( - link, - DP_MAX_LINK_RATE_PHY_REPEATER, - &link->dpcd_caps.lttpr_caps.max_link_rate, - sizeof(link->dpcd_caps.lttpr_caps.max_link_rate)); - - status = core_link_read_dpcd( - link, - DP_PHY_REPEATER_MODE, - (uint8_t *)&link->dpcd_caps.lttpr_caps.mode, - sizeof(link->dpcd_caps.lttpr_caps.mode)); - - status = core_link_read_dpcd( - link, - DP_MAX_LANE_COUNT_PHY_REPEATER, - &link->dpcd_caps.lttpr_caps.max_lane_count, - sizeof(link->dpcd_caps.lttpr_caps.max_lane_count)); - - status = core_link_read_dpcd( - link, - DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, - &link->dpcd_caps.lttpr_caps.max_ext_timeout, - sizeof(link->dpcd_caps.lttpr_caps.max_ext_timeout)); } else { - dc_link_aux_configure_timeout(link->ddc, prev_timeout_val); + /*No lttpr reset timeout to its default value*/ + link->is_lttpr_mode_transparent = true; + dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 837859e65e45..45dfed8bcaf7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -420,20 +420,9 @@ enum link_training_offset { LTTPR_PHY_REPEATER8 = 8 }; -enum lttpr_mode { - phy_repeater_mode_transparent = 0x55, - phy_repeater_mode_non_transparent = 0xAA -}; - -enum lttpr_rev { - lttpr_rev_unknown = 0x0, - lttpr_rev_14 = 0x14, - lttpr_rev_max = 0x20 -}; - struct dc_lttpr_caps { - enum lttpr_rev revision; - enum lttpr_mode mode; + union dpcd_rev revision; + uint8_t mode; uint8_t max_lane_count; uint8_t max_link_rate; uint8_t phy_repeater_cnt; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index a6500b98fe0d..1e6ff6eb5bfc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -29,7 +29,7 @@ #define LINK_TRAINING_ATTEMPTS 4 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ #define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 3200 /*us*/ -#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/ +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ struct dc_link; struct dc_stream_state; -- cgit From 16f17eda8bad3c29db3265298009994ae136237b Mon Sep 17 00:00:00 2001 From: Leo Li Date: Mon, 4 Nov 2019 09:22:23 -0500 Subject: drm/amd/display: Send vblank and user events at vsartup for DCN [Why] For DCN hardware, the crtc_high_irq handler is assigned to the vstartup interrupt. This is different from DCE, which has it assigned to vblank start. We'd like to send vblank and user events at vstartup because: * It happens close enough to vupdate - the point of no return for HW. * It is programmed as lines relative to vblank end - i.e. it is not in the variable portion when VRR is enabled. We should signal user events here. * The pflip interrupt responsible for sending user events today only fires if the DCH HUBP component is not clock gated. In situations where planes are disabled - but the CRTC is enabled - user events won't be sent out, leading to flip done timeouts. Consequently, this makes vupdate on DCN hardware redundant. It will be removed in the next change. [How] Add a DCN-specific crtc_high_irq handler, and hook it to the VStartup signal. Inside the DCN handler, we send off user events if the pflip handler hasn't already done so. Reviewed-by: Nicholas Kazlauskas Signed-off-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 65 ++++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f2c9a5e5c6cc..8dc3376908af 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -485,6 +485,69 @@ static void dm_crtc_high_irq(void *interrupt_params) } } + +/** + * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs + * @interrupt params - interrupt parameters + * + * Notify DRM's vblank event handler at VSTARTUP + * + * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which: + * * We are close enough to VUPDATE - the point of no return for hw + * * We are in the fixed portion of variable front porch when vrr is enabled + * * We are before VUPDATE, where double-buffered vrr registers are swapped + * + * It is therefore the correct place to signal vblank, send user flip events, + * and update VRR. + */ +static void dm_dcn_crtc_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + struct amdgpu_crtc *acrtc; + struct dm_crtc_state *acrtc_state; + unsigned long flags; + + acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); + + if (!acrtc) + return; + + acrtc_state = to_dm_crtc_state(acrtc->base.state); + + DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state)); + + amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); + drm_crtc_handle_vblank(&acrtc->base); + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + + if (acrtc_state->vrr_params.supported && + acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { + mod_freesync_handle_v_update( + adev->dm.freesync_module, + acrtc_state->stream, + &acrtc_state->vrr_params); + + dc_stream_adjust_vmin_vmax( + adev->dm.dc, + acrtc_state->stream, + &acrtc_state->vrr_params.adjust); + } + + if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) { + if (acrtc->event) { + drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); + acrtc->event = NULL; + drm_crtc_vblank_put(&acrtc->base); + } + acrtc->pflip_status = AMDGPU_FLIP_NONE; + } + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); +} + static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -2175,7 +2238,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_crtc_high_irq, c_irq_params); + dm_dcn_crtc_high_irq, c_irq_params); } /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to -- cgit From 3a2ce8d66a4b81cb6d3c6c61e3fb4d061fff0e38 Mon Sep 17 00:00:00 2001 From: Leo Li Date: Mon, 4 Nov 2019 14:08:05 -0500 Subject: drm/amd/display: Disable VUpdate interrupt for DCN hardware [Why] On DCN hardware, the crtc_high_irq handler makes vupdate_high_irq handler redundant. All the vupdate handler does is handle vblank events, and update vrr for DCE hw (excluding VEGA, more on that later). As far as usermode is concerned. vstartup happens close enough to vupdate on DCN that it can be considered the "same". Handling vblank and updating vrr at vstartup effectively replaces vupdate on DCN. Vega is a bit special. Like DCN, the VRR registers on Vega are double-buffered, and swapped at vupdate. But Unlike DCN, it lacks a vstartup interrupt. This means we can't quite remove the vupdate handler for it, since delayed user events due to vrr are sent off there. [How] Remove registration of vupdate interrupt handler for DCN. Disable vupdate interrupt if asic family DCN, enable otherwise. Reviewed-by: Nicholas Kazlauskas Signed-off-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 32 +++-------------------- 1 file changed, 4 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8dc3376908af..b5b7c3daf2e8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2241,34 +2241,6 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) dm_dcn_crtc_high_irq, c_irq_params); } - /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to - * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx - * to trigger at end of each vblank, regardless of state of the lock, - * matching DCE behaviour. - */ - for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; - i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; - i++) { - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); - - if (r) { - DRM_ERROR("Failed to add vupdate irq id!\n"); - return r; - } - - int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; - int_params.irq_source = - dc_interrupt_to_irq_source(dc, i, 0); - - c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; - - c_irq_params->adev = adev; - c_irq_params->irq_src = int_params.irq_source; - - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_vupdate_high_irq, c_irq_params); - } - /* Use GRPH_PFLIP interrupt */ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; @@ -4178,6 +4150,10 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) struct amdgpu_device *adev = crtc->dev->dev_private; int rc; + /* Do not set vupdate for DCN hardware */ + if (adev->family > AMDGPU_FAMILY_AI) + return 0; + irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; -- cgit From 1da37801a8b0fffb024fea594c7f1d7867ed8aa0 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 6 Nov 2019 14:38:55 -0500 Subject: drm/amd/display: Drop CONFIG_DRM_AMD_DC_DCN2_0 and DSC_SUPPORTED [Why] DCN2 and DSC are stable enough to be build by default. So drop the flags. [How] Remove them using the unifdef tool. The following commands were executed in sequence: $ find -name '*.c' -exec unifdef -m -DCONFIG_DRM_AMD_DC_DSC_SUPPORT -DCONFIG_DRM_AMD_DC_DCN2_0 -UCONFIG_TRIM_DRM_AMD_DC_DCN2_0 '{}' ';' $ find -name '*.h' -exec unifdef -m -DCONFIG_DRM_AMD_DC_DSC_SUPPORT -DCONFIG_DRM_AMD_DC_DCN2_0 -UCONFIG_TRIM_DRM_AMD_DC_DCN2_0 '{}' ';' In addition: * Remove from kconfig, and replace any dependencies with DCN1_0. * Remove from any makefiles. * Fix and cleanup NV defninitions in dal_asic_id.h * Expand DCN1 ifdef to include DCN2 code in the following files: * clk_mgr/clk_mgr.c: dc_clk_mgr_create() * core/dc_resources.c: dc_create_resource_pool() * dce/dce_dmcu.c: dcn20_*lock_phy() * dce/dce_dmcu.c: dcn20_funcs * dce/dce_dmcu.c: dcn20_dmcu_create() * gpio/hw_factory.c: dal_hw_factory_init() * gpio/hw_translate.c: dal_hw_translate_init() Signed-off-by: Leo Li Signed-off-by: Bhawanpreet Lakha Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 -- drivers/gpu/drm/amd/display/Kconfig | 13 +----- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 ------ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 - .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 2 - .../drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 2 - drivers/gpu/drm/amd/display/dc/Makefile | 12 ++---- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 2 - .../amd/display/dc/bios/command_table_helper2.c | 2 - drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 4 -- drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile | 2 - drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c | 4 +- drivers/gpu/drm/amd/display/dc/core/dc.c | 48 ---------------------- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 18 -------- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 24 ----------- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 6 --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 8 +--- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 6 --- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 6 --- drivers/gpu/drm/amd/display/dc/dc.h | 38 ----------------- drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 4 -- drivers/gpu/drm/amd/display/dc/dc_dsc.h | 2 - drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 20 --------- drivers/gpu/drm/amd/display/dc/dc_link.h | 4 -- drivers/gpu/drm/amd/display/dc/dc_stream.h | 16 -------- drivers/gpu/drm/amd/display/dc/dc_types.h | 6 --- drivers/gpu/drm/amd/display/dc/dce/dce_abm.h | 4 -- drivers/gpu/drm/amd/display/dc/dce/dce_aux.h | 2 - .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 4 -- .../gpu/drm/amd/display/dc/dce/dce_clock_source.h | 6 --- drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 14 ++----- drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h | 2 - drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 4 -- drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 6 --- drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h | 8 ---- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 4 -- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 6 --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 4 -- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c | 2 - .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 2 - drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c | 2 - drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 4 -- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 2 - .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 16 -------- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c | 4 -- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h | 6 --- .../drm/amd/display/dc/dcn10/dcn10_link_encoder.h | 10 ----- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c | 2 - drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c | 4 -- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 4 -- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 9 ---- .../amd/display/dc/dcn10/dcn10_stream_encoder.h | 8 ---- drivers/gpu/drm/amd/display/dc/dcn20/Makefile | 2 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c | 2 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h | 2 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 2 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 14 ------- .../drm/amd/display/dc/dcn20/dcn20_link_encoder.c | 6 --- .../drm/amd/display/dc/dcn20/dcn20_link_encoder.h | 2 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 4 -- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h | 2 - .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 42 ------------------- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.h | 2 - .../amd/display/dc/dcn20/dcn20_stream_encoder.c | 6 --- .../drm/amd/display/dc/dcn21/dcn21_link_encoder.c | 2 - .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 16 -------- drivers/gpu/drm/amd/display/dc/dm_helpers.h | 2 - drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | 6 --- drivers/gpu/drm/amd/display/dc/dml/Makefile | 4 +- .../amd/display/dc/dml/dcn21/display_mode_vba_21.c | 2 - .../display/dc/dml/dcn21/display_rq_dlg_calc_21.c | 2 - .../drm/amd/display/dc/dml/display_mode_enums.h | 2 - .../gpu/drm/amd/display/dc/dml/display_mode_lib.c | 6 --- .../gpu/drm/amd/display/dc/dml/display_mode_lib.h | 6 --- .../drm/amd/display/dc/dml/display_mode_structs.h | 2 - .../gpu/drm/amd/display/dc/dml/display_mode_vba.c | 2 - .../gpu/drm/amd/display/dc/dml/display_mode_vba.h | 2 - drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 2 - drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h | 2 - drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h | 2 - drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c | 2 - drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h | 2 - drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c | 2 - drivers/gpu/drm/amd/display/dc/gpio/Makefile | 2 - .../amd/display/dc/gpio/dcn20/hw_factory_dcn20.c | 2 - .../amd/display/dc/gpio/dcn20/hw_factory_dcn20.h | 2 - .../amd/display/dc/gpio/dcn20/hw_translate_dcn20.c | 2 - .../amd/display/dc/gpio/dcn20/hw_translate_dcn20.h | 2 - .../amd/display/dc/gpio/dcn21/hw_factory_dcn21.c | 2 - .../amd/display/dc/gpio/dcn21/hw_translate_dcn21.c | 2 - drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h | 12 ------ drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c | 4 -- drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c | 6 +-- drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c | 6 +-- drivers/gpu/drm/amd/display/dc/inc/core_status.h | 2 - drivers/gpu/drm/amd/display/dc/inc/core_types.h | 22 ---------- drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h | 2 - .../drm/amd/display/dc/inc/hw/clk_mgr_internal.h | 12 ------ drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 4 -- drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 12 ------ drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h | 2 - drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h | 10 ----- drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 4 -- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 10 ----- .../gpu/drm/amd/display/dc/inc/hw/link_encoder.h | 8 ---- drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h | 10 ----- drivers/gpu/drm/amd/display/dc/inc/hw/opp.h | 4 -- .../gpu/drm/amd/display/dc/inc/hw/stream_encoder.h | 10 ----- .../drm/amd/display/dc/inc/hw/timing_generator.h | 8 ---- drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 12 ------ drivers/gpu/drm/amd/display/dc/inc/resource.h | 4 -- drivers/gpu/drm/amd/display/dc/irq/Makefile | 2 - .../display/dc/virtual/virtual_stream_encoder.c | 8 ---- drivers/gpu/drm/amd/display/include/dal_asic_id.h | 2 - drivers/gpu/drm/amd/display/include/dal_types.h | 2 - drivers/gpu/drm/amd/display/include/logger_types.h | 6 --- .../gpu/drm/amd/display/modules/inc/mod_shared.h | 2 - 117 files changed, 14 insertions(+), 744 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8ff69a5c2327..1b865d7f904d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1527,7 +1527,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) } parse_soc_bounding_box: -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /* * soc bounding box info is not integrated in disocovery table, * we always need to parse it from gpu info firmware. @@ -1538,7 +1537,6 @@ parse_soc_bounding_box: le32_to_cpu(hdr->header.ucode_array_offset_bytes)); adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; } -#endif break; } default: @@ -2602,8 +2600,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_VEGA20: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 313183b80032..0b4c71dc0447 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -15,21 +15,11 @@ config DRM_AMD_DC config DRM_AMD_DC_DCN1_0 def_bool n help - RV family support for display engine - -config DRM_AMD_DC_DCN2_0 - bool "DCN 2.0 family" - default y - depends on DRM_AMD_DC && X86 - depends on DRM_AMD_DC_DCN1_0 - help - Choose this option if you want to have - Navi support for display engine + RV and NV family support for display engine config DRM_AMD_DC_DCN2_1 bool "DCN 2.1 family" depends on DRM_AMD_DC && X86 - depends on DRM_AMD_DC_DCN2_0 help Choose this option if you want to have Renoir support for display engine @@ -39,7 +29,6 @@ config DRM_AMD_DC_DSC_SUPPORT default y depends on DRM_AMD_DC && X86 depends on DRM_AMD_DC_DCN1_0 - depends on DRM_AMD_DC_DCN2_0 help Choose this option if you want to have Dynamic Stream Compression support diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b5b7c3daf2e8..6c986d446864 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -942,9 +942,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.power_down_display_on_boot = true; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 init_data.soc_bounding_box = adev->dm.soc_bounding_box; -#endif /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -2755,11 +2753,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI12: case CHIP_NAVI10: case CHIP_NAVI14: -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) case CHIP_RENOIR: #endif @@ -2915,7 +2911,6 @@ static int dm_early_init(void *handle) adev->mode_info.num_dig = 4; break; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI10: case CHIP_NAVI12: adev->mode_info.num_crtc = 6; @@ -2927,7 +2922,6 @@ static int dm_early_init(void *handle) adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) case CHIP_RENOIR: adev->mode_info.num_crtc = 4; @@ -3227,11 +3221,9 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA20 || -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_NAVI14 || adev->asic_type == CHIP_NAVI12 || -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) adev->asic_type == CHIP_RENOIR || #endif @@ -3940,10 +3932,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; int mode_refresh; int preferred_refresh = 0; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dsc_dec_dpcd_caps dsc_caps; uint32_t link_bandwidth_kbps; -#endif struct dc_sink *sink = NULL; if (aconnector == NULL) { @@ -4018,7 +4008,6 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, fill_stream_properties_from_drm_display_mode(stream, &mode, &aconnector->base, con_state, old_stream); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT stream->timing.flags.DSC = 0; if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { @@ -4037,7 +4026,6 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, &stream->timing.dsc_cfg)) stream->timing.flags.DSC = 1; } -#endif update_stream_scaling_settings(&mode, dm_state, stream); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index ab5da59aadc1..a8fc90a927d6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -287,7 +287,6 @@ struct amdgpu_display_manager { const struct firmware *fw_dmcu; uint32_t dmcu_fw_version; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /** * @soc_bounding_box: * @@ -295,7 +294,6 @@ struct amdgpu_display_manager { * available in FW */ const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif }; struct amdgpu_dm_connector { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 57a226c0bc4a..72e677796a48 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -511,7 +511,6 @@ bool dm_helpers_submit_i2c( return result; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, const struct dc_stream_state *stream, @@ -522,7 +521,6 @@ bool dm_helpers_dp_write_dsc_enable( return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1); } -#endif bool dm_helpers_is_dp_sink_present(struct dc_link *link) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 55a520a63712..118488e473c1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -995,7 +995,6 @@ void dm_pp_get_funcs( funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq; break; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 case DCN_VERSION_2_0: funcs->ctx.ver = PP_SMU_VER_NV; funcs->nv_funcs.pp_smu.dm = ctx; @@ -1018,7 +1017,6 @@ void dm_pp_get_funcs( funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states; funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; break; -#endif #ifdef CONFIG_DRM_AMD_DC_DCN2_1 case DCN_VERSION_2_1: diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 90482b158283..38ef29719400 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -25,18 +25,12 @@ DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN1_0 DC_LIBS += dcn20 -endif - - -ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DC_LIBS += dsc -endif - -ifdef CONFIG_DRM_AMD_DC_DCN1_0 DC_LIBS += dcn10 dml endif + ifdef CONFIG_DRM_AMD_DC_DCN2_1 DC_LIBS += dcn21 endif @@ -59,7 +53,7 @@ include $(AMD_DC) DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN1_0 DISPLAY_CORE += dc_vm_helper.o endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 05be2c0c25d1..6e29ba8e582e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1417,10 +1417,8 @@ static enum bp_result get_integrated_info_v11( info->ma_channel_number = info_v11->umachannelnumber; info->lvds_ss_percentage = le16_to_cpu(info_v11->lvds_ss_percentage); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 info->dp_ss_control = le16_to_cpu(info_v11->reserved1); -#endif info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v11->lvds_ss_rate_10hz); info->hdmi_ss_percentage = diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index db153ddf0fee..45bb2bd81ba1 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -62,11 +62,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2( return true; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 9b2cb57bf2ba..a4ddd657598f 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -53,13 +53,9 @@ * remain as-is as it provides us with a guarantee from HW that it is correct. */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /* Defaults from spreadsheet rev#247. * RV2 delta: dram_clock_change_latency, max_num_dpp */ -#else -/* Defaults from spreadsheet rev#247 */ -#endif const struct dcn_soc_bounding_box dcn10_soc_defaults = { /* latencies */ .sr_exit_time = 17, /*us*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index b864869cc7e3..9f15817a3eed 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -72,9 +72,7 @@ CLK_MGR_DCN10 = rv1_clk_mgr.o rv1_clk_mgr_vbios_smu.o rv2_clk_mgr.o AMD_DAL_CLK_MGR_DCN10 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn10/,$(CLK_MGR_DCN10)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN10) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_0 ############################################################################### # DCN20 ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 8828dd9c3783..76b4831a826e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -152,13 +152,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p break; } break; -#endif /* Family RV */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case FAMILY_NV: dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); break; -#endif /* Family NV */ +#endif /* Family RV and NV*/ default: ASSERT(0); /* Unknown Asic */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0cef26941b0d..e7e552f02b51 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -60,13 +60,9 @@ #include "dc_link_dp.h" #include "dc_dmub_srv.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "vm_helper.h" -#endif #include "dce/dce_i2c.h" @@ -578,11 +574,9 @@ static void destruct(struct dc *dc) dc->dcn_ip = NULL; #endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 kfree(dc->vm_helper); dc->vm_helper = NULL; -#endif } static bool construct(struct dc *dc, @@ -599,7 +593,6 @@ static bool construct(struct dc *dc, enum dce_version dc_version = DCE_VERSION_UNKNOWN; dc->config = init_params->flags; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 // Allocate memory for the vm_helper dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); if (!dc->vm_helper) { @@ -607,7 +600,6 @@ static bool construct(struct dc *dc, goto fail; } -#endif memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); @@ -641,9 +633,7 @@ static bool construct(struct dc *dc, } dc->dcn_ip = dcn_ip; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 dc->soc_bounding_box = init_params->soc_bounding_box; -#endif #endif dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); @@ -745,7 +735,6 @@ fail: return false; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static bool disable_all_writeback_pipes_for_stream( const struct dc *dc, struct dc_stream_state *stream, @@ -758,7 +747,6 @@ static bool disable_all_writeback_pipes_for_stream( return true; } -#endif static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { @@ -784,16 +772,12 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) } if (should_disable && old_stream) { dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); -#endif if (dc->hwss.apply_ctx_for_surface) dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.program_front_end_for_ctx) dc->hwss.program_front_end_for_ctx(dc, dangling_context); -#endif } current_ctx = dc->current_state; @@ -1183,10 +1167,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->stream_status[i].plane_count, context); /* use new pipe config in new context */ } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.program_front_end_for_ctx) dc->hwss.program_front_end_for_ctx(dc, context); -#endif /* Program hardware */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1205,10 +1187,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c } /* Program all planes within new context*/ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.program_front_end_for_ctx) dc->hwss.program_front_end_for_ctx(dc, context); -#endif for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; @@ -1692,10 +1672,8 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->gamut_remap) su_flags->bits.gamut_remap = 1; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (stream_update->wb_update) su_flags->bits.wb_update = 1; -#endif if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -1860,7 +1838,6 @@ static void copy_surface_update_to_plane( sizeof(struct dc_transfer_func_distributed_points)); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (srf_update->func_shaper && (surface->in_shaper_func != srf_update->func_shaper)) @@ -1879,7 +1856,6 @@ static void copy_surface_update_to_plane( memcpy(surface->blend_tf, srf_update->blend_tf, sizeof(*surface->blend_tf)); -#endif if (srf_update->input_csc_color_matrix) surface->input_csc_color_matrix = *srf_update->input_csc_color_matrix; @@ -1954,7 +1930,6 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->dither_option) stream->dither_option = *update->dither_option; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* update current stream with writeback info */ if (update->wb_update) { int i; @@ -1965,8 +1940,6 @@ static void copy_stream_update_to_stream(struct dc *dc, stream->writeback_info[i] = update->wb_update->writeback_info[i]; } -#endif -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) if (update->dsc_config) { struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; uint32_t old_dsc_enabled = stream->timing.flags.DSC; @@ -1981,7 +1954,6 @@ static void copy_stream_update_to_stream(struct dc *dc, stream->timing.flags.DSC = old_dsc_enabled; } } -#endif } static void commit_planes_do_stream_update(struct dc *dc, @@ -2022,31 +1994,25 @@ static void commit_planes_do_stream_update(struct dc *dc, dc_stream_program_csc_matrix(dc, stream); if (stream_update->dither_option) { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; -#endif resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, &stream->bit_depth_params, &stream->clamping); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) while (odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, &stream->bit_depth_params, &stream->clamping); odm_pipe = odm_pipe->next_odm_pipe; } -#endif } -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) { dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true); dp_update_dsc_config(pipe_ctx); dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false); } -#endif /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; @@ -2133,15 +2099,12 @@ static void commit_planes_for_stream(struct dc *dc, */ if (dc->hwss.apply_ctx_for_surface) dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.program_front_end_for_ctx) dc->hwss.program_front_end_for_ctx(dc, context); -#endif return; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (!IS_DIAG_DC(dc->ctx->dce_environment)) { for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2163,7 +2126,6 @@ static void commit_planes_for_stream(struct dc *dc, } } } -#endif // Update Type FULL, Surface updates for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -2184,7 +2146,6 @@ static void commit_planes_for_stream(struct dc *dc, if (update_type == UPDATE_TYPE_FAST) continue; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); if (dc->hwss.program_triplebuffer != NULL && @@ -2193,7 +2154,6 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.program_triplebuffer( dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); } -#endif stream_status = stream_get_status(context, pipe_ctx->stream); @@ -2202,10 +2162,8 @@ static void commit_planes_for_stream(struct dc *dc, dc, pipe_ctx->stream, stream_status->plane_count, context); } } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) dc->hwss.program_front_end_for_ctx(dc, context); -#endif // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { @@ -2215,7 +2173,6 @@ static void commit_planes_for_stream(struct dc *dc, */ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.set_flip_control_gsl) for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2234,7 +2191,6 @@ static void commit_planes_for_stream(struct dc *dc, plane_state->flip_immediate); } } -#endif /* Perform requested Updates */ for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2247,7 +2203,6 @@ static void commit_planes_for_stream(struct dc *dc, if (pipe_ctx->plane_state != plane_state) continue; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /*program triple buffer after lock based on flip type*/ if (dc->hwss.program_triplebuffer != NULL && !dc->debug.disable_tri_buf) { @@ -2255,7 +2210,6 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.program_triplebuffer( dc, pipe_ctx, plane_state->triplebuffer_flips); } -#endif if (srf_updates[i].flip_addr) dc->hwss.update_plane_addr(dc, pipe_ctx); } @@ -2421,12 +2375,10 @@ void dc_set_power_state( dc->hwss.init_hw(dc); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 if (dc->hwss.init_sys_ctx != NULL && dc->vm_pa_config.valid) { dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); } -#endif break; default: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index b58916212187..0a0badb2e206 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1496,9 +1496,7 @@ static enum dc_status enable_link_dp( struct dc_link *link = stream->link; struct dc_link_settings link_settings = {0}; enum dp_panel_mode panel_mode; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool fec_enable; -#endif int i; bool apply_seamless_boot_optimization = false; @@ -1569,14 +1567,12 @@ static enum dc_status enable_link_dp( else status = DC_FAIL_DP_LINK_TRAINING; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; dp_set_fec_enable(link, fec_enable); -#endif return status; } @@ -2199,14 +2195,12 @@ static void disable_link(struct dc_link *link, enum signal_type signal) dp_disable_link_phy(link, signal); else dp_disable_link_phy_mst(link, signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (dc_is_dp_sst_signal(signal) || link->mst_stream_alloc_table.stream_count == 0) { dp_set_fec_enable(link, false); dp_set_fec_ready(link, false); } -#endif } else { if (signal != SIGNAL_TYPE_VIRTUAL) link->link_enc->funcs->disable_output(link->link_enc, signal); @@ -3028,23 +3022,19 @@ void core_link_enable_stream( CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, COLOR_DEPTH_UNDEFINED); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); } -#endif core_dc->hwss.enable_stream(pipe_ctx); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Set DPS PPS SDP (AKA "info frames") */ if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_pps_sdp(pipe_ctx, true); } -#endif if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_allocate_mst_payload(pipe_ctx); @@ -3058,14 +3048,12 @@ void core_link_enable_stream( update_psp_stream_config(pipe_ctx, false); #endif } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); } -#endif } void core_link_disable_stream(struct pipe_ctx *pipe_ctx) @@ -3114,12 +3102,10 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) core_dc->hwss.disable_stream(pipe_ctx); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, false); } -#endif } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) @@ -3187,13 +3173,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing( uint32_t bits_per_channel = 0; uint32_t kbps; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (timing->flags.DSC) { kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); return kbps; } -#endif switch (timing->display_color_depth) { case COLOR_DEPTH_666: @@ -3369,7 +3353,6 @@ uint32_t dc_link_bandwidth_kbps( link_bw_kbps *= 8; /* 8 bits per byte*/ link_bw_kbps *= link_setting->lane_count; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { /* Account for FEC overhead. * We have to do it based on caps, @@ -3394,7 +3377,6 @@ uint32_t dc_link_bandwidth_kbps( link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000, link_bw_kbps, 32); } -#endif return link_bw_kbps; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 7d18fc1e68c6..65de32fbcc83 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -4,12 +4,8 @@ #include "dc_link_dp.h" #include "dm_helpers.h" #include "opp.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "resource.h" -#endif #include "inc/core_types.h" #include "link_hwss.h" @@ -1365,9 +1361,7 @@ enum link_training_result dc_link_dp_perform_link_training( enum link_training_result status = LINK_TRAINING_SUCCESS; struct link_training_settings lt_settings; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool fec_enable; -#endif uint8_t repeater_cnt; uint8_t repeater_id; @@ -1380,14 +1374,12 @@ enum link_training_result dc_link_dp_perform_link_training( /* 1. set link rate, lane count and spread. */ dpcd_set_link_settings(link, <_settings); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; dp_set_fec_ready(link, fec_enable); -#endif if (!link->is_lttpr_mode_transparent) { /* Configure lttpr mode */ @@ -1529,9 +1521,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt( enum link_training_result lt_status = LINK_TRAINING_SUCCESS; enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT; enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool fec_enable = false; -#endif initialize_training_settings( link, @@ -1551,11 +1541,9 @@ enum link_training_result dc_link_dp_sync_lt_attempt( dp_enable_link_phy(link, link->connector_signal, dp_cs_id, link_settings); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Set FEC enable */ fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; dp_set_fec_ready(link, fec_enable); -#endif if (lt_overrides->alternate_scrambler_reset) { if (*lt_overrides->alternate_scrambler_reset) @@ -1596,9 +1584,7 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) */ if (link_down == true) { dp_disable_link_phy(link, link->connector_signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT dp_set_fec_ready(link, false); -#endif } link->sync_lt_in_progress = false; @@ -3210,7 +3196,6 @@ static bool retrieve_link_cap(struct dc_link *link) dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); @@ -3232,7 +3217,6 @@ static bool retrieve_link_cap(struct dc_link *link) link->dpcd_caps.dsc_caps.dsc_ext_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw)); } -#endif /* Connectivity log: detection */ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); @@ -3361,14 +3345,12 @@ static void set_crtc_test_pattern(struct dc_link *link, stream->timing.display_color_depth; struct bit_depth_reduction_params params; struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) int width = pipe_ctx->stream->timing.h_addressable + pipe_ctx->stream->timing.h_border_left + pipe_ctx->stream->timing.h_border_right; int height = pipe_ctx->stream->timing.v_addressable + pipe_ctx->stream->timing.v_border_bottom + pipe_ctx->stream->timing.v_border_top; -#endif memset(¶ms, 0, sizeof(params)); @@ -3412,7 +3394,6 @@ static void set_crtc_test_pattern(struct dc_link *link, if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, controller_test_pattern, color_depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else if (opp->funcs->opp_set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -3440,7 +3421,6 @@ static void set_crtc_test_pattern(struct dc_link *link, width, height); } -#endif } break; case DP_TEST_PATTERN_VIDEO_MODE: @@ -3453,7 +3433,6 @@ static void set_crtc_test_pattern(struct dc_link *link, pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, color_depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else if (opp->funcs->opp_set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -3480,7 +3459,6 @@ static void set_crtc_test_pattern(struct dc_link *link, width, height); } -#endif } break; @@ -3755,7 +3733,6 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) return DP_PANEL_MODE_DEFAULT; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void dp_set_fec_ready(struct dc_link *link, bool ready) { /* FEC has to be "set ready" before the link training. @@ -3818,5 +3795,4 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) } } } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 5efbdc1eb173..bb1e8e5b5252 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -12,12 +12,8 @@ #include "dc_link_ddc.h" #include "dm_helpers.h" #include "dpcd_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "resource.h" -#endif static uint8_t convert_to_count(uint8_t lttpr_repeater_count) { @@ -374,7 +370,6 @@ void dp_retrain_link_dp_test(struct dc_link *link, } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define DC_LOGGER \ dsc->ctx->logger static void dsc_optc_config_log(struct display_stream_compressor *dsc, @@ -572,5 +567,4 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) dp_set_dsc_pps_sdp(pipe_ctx, true); return true; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 30f5434fa8e9..6e20c76b9933 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -49,9 +49,7 @@ #if defined(CONFIG_DRM_AMD_DC_DCN1_0) #include "dcn10/dcn10_resource.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/dcn20_resource.h" -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/dcn21_resource.h" #endif @@ -111,11 +109,9 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) break; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case FAMILY_NV: dc_version = DCN_VERSION_2_0; break; -#endif default: dc_version = DCE_VERSION_UNKNOWN; break; @@ -167,18 +163,16 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, case DCN_VERSION_1_01: res_pool = dcn10_create_resource_pool(init_data, dc); break; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: res_pool = dcn20_create_resource_pool(init_data, dc); break; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: res_pool = dcn21_create_resource_pool(init_data, dc); break; +#endif #endif default: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index bb09243758fe..dc05c14530b0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -108,7 +108,6 @@ static void construct(struct dc_stream_state *stream, /* EDID CAP translation for HDMI 2.0 */ stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT memset(&stream->timing.dsc_cfg, 0, sizeof(stream->timing.dsc_cfg)); stream->timing.dsc_cfg.num_slices_h = 0; stream->timing.dsc_cfg.num_slices_v = 0; @@ -117,7 +116,6 @@ static void construct(struct dc_stream_state *stream, stream->timing.dsc_cfg.linebuf_depth = 9; stream->timing.dsc_cfg.version_minor = 2; stream->timing.dsc_cfg.ycbcr422_simple = 0; -#endif update_stream_signal(stream, dc_sink_data); @@ -367,7 +365,6 @@ bool dc_stream_set_cursor_position( return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info) @@ -480,7 +477,6 @@ bool dc_stream_remove_writeback(struct dc *dc, return true; } -#endif uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) { @@ -567,7 +563,6 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, return ret; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) { bool status = true; @@ -630,7 +625,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, return true; } -#endif void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index b9d6a5bd8522..aaecdee2b8b4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -50,7 +50,6 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state plane_state->in_transfer_func->type = TF_TYPE_BYPASS; plane_state->in_transfer_func->ctx = ctx; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) plane_state->in_shaper_func = dc_create_transfer_func(); if (plane_state->in_shaper_func != NULL) { plane_state->in_shaper_func->type = TF_TYPE_BYPASS; @@ -67,7 +66,6 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state plane_state->blend_tf->ctx = ctx; } -#endif } static void destruct(struct dc_plane_state *plane_state) @@ -80,7 +78,6 @@ static void destruct(struct dc_plane_state *plane_state) plane_state->in_transfer_func); plane_state->in_transfer_func = NULL; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (plane_state->in_shaper_func != NULL) { dc_transfer_func_release( plane_state->in_shaper_func); @@ -97,7 +94,6 @@ static void destruct(struct dc_plane_state *plane_state) plane_state->blend_tf = NULL; } -#endif } /******************************************************************************* @@ -262,7 +258,6 @@ alloc_fail: return NULL; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static void dc_3dlut_func_free(struct kref *kref) { struct dc_3dlut *lut = container_of(kref, struct dc_3dlut, refcount); @@ -296,6 +291,5 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut) { kref_get(&lut->refcount); } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 8ff7556eb2c4..102a55d8d026 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -113,17 +113,13 @@ struct dc_caps { bool psp_setup_panel_mode; bool extended_aux_timeout_support; bool dmcub_support; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool hw_3d_lut; -#endif struct dc_plane_cap planes[MAX_PLANES]; }; struct dc_bug_wa { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool no_connect_phy_config; bool dedcn20_305_wa; -#endif bool skip_clock_update; }; @@ -364,10 +360,8 @@ struct dc_debug_options { bool disable_dfs_bypass; bool disable_dpp_power_gate; bool disable_hubp_power_gate; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_dsc_power_gate; int dsc_min_slice_height_override; -#endif bool disable_pplib_wm_range; enum wm_report_mode pplib_wm_report_mode; unsigned int min_disp_clk_khz; @@ -406,9 +400,7 @@ struct dc_debug_options { bool dmcub_emulation; bool dmub_command_table; /* for testing only */ struct dc_bw_validation_profile bw_val_profile; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_fec; -#endif #ifdef CONFIG_DRM_AMD_DC_DCN2_1 bool disable_48mhz_pwrdwn; #endif @@ -417,9 +409,7 @@ struct dc_debug_options { */ unsigned int force_min_dcfclk_mhz; bool disable_timing_sync; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool cm_in_bypass; -#endif int force_clock_mode;/*every mode change.*/ bool nv12_iflip_vm_wa; @@ -432,7 +422,6 @@ struct dc_debug_data { uint32_t auxErrorCount; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config { struct { uint64_t start_addr; @@ -462,7 +451,6 @@ struct dc_virtual_addr_space_config { uint32_t page_table_block_size_in_bytes; uint8_t page_table_depth; // 1 = 1 level, 2 = 2 level, etc. 0 = invalid }; -#endif struct dc_bounding_box_overrides { int sr_exit_time_ns; @@ -490,9 +478,7 @@ struct dc { struct dc_bounding_box_overrides bb_overrides; struct dc_bug_wa work_arounds; struct dc_context *ctx; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config vm_pa_config; -#endif uint8_t link_count; struct dc_link *links[MAX_PIPES * 2]; @@ -530,10 +516,8 @@ struct dc { struct dc_debug_data debug_data; const char *build_id; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct vm_helper *vm_helper; const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif }; enum frame_buffer_mode { @@ -570,13 +554,11 @@ struct dc_init_data { struct dc_config flags; uint32_t log_mask; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /** * gpu_info FW provided soc bounding box struct or 0 if not * available in FW */ const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif }; struct dc_callback_init { @@ -591,11 +573,9 @@ struct dc *dc_create(const struct dc_init_data *init_params); void dc_hardware_init(struct dc *dc); int dc_get_vmid_use_vector(struct dc *dc); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid); /* Returns the number of vmids supported */ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_config); -#endif void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params); void dc_deinit_callbacks(struct dc *dc); @@ -671,7 +651,6 @@ struct dc_transfer_func { }; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) union dc_3dlut_state { struct { @@ -695,7 +674,6 @@ struct dc_3dlut { union dc_3dlut_state state; struct dc_context *ctx; }; -#endif /* * This structure is filled in by dc_surface_get_status and contains * the last requested address and the currently active address so the called @@ -746,9 +724,7 @@ union surface_update_flags { struct dc_plane_state { struct dc_plane_address address; struct dc_plane_flip_time time; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool triplebuffer_flips; -#endif struct scaling_taps scaling_quality; struct rect src_rect; struct rect dst_rect; @@ -771,11 +747,9 @@ struct dc_plane_state { enum dc_color_space color_space; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_3dlut *lut3d_func; struct dc_transfer_func *in_shaper_func; struct dc_transfer_func *blend_tf; -#endif enum surface_pixel_format format; enum dc_rotation_angle rotation; @@ -844,11 +818,9 @@ struct dc_surface_update { const struct dc_csc_transform *input_csc_color_matrix; const struct fixed31_32 *coeff_reduction_factor; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) const struct dc_transfer_func *func_shaper; const struct dc_3dlut *lut3d_func; const struct dc_transfer_func *blend_tf; -#endif }; /* @@ -869,11 +841,9 @@ void dc_transfer_func_retain(struct dc_transfer_func *dc_tf); void dc_transfer_func_release(struct dc_transfer_func *dc_tf); struct dc_transfer_func *dc_create_transfer_func(void); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_3dlut *dc_create_3dlut_func(void); void dc_3dlut_func_release(struct dc_3dlut *lut); void dc_3dlut_func_retain(struct dc_3dlut *lut); -#endif /* * This structure holds a surface address. There could be multiple addresses * in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such @@ -990,10 +960,8 @@ struct dpcd_caps { bool panel_mode_edp; bool dpcd_display_control_capable; bool ext_receiver_cap_field_present; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT union dpcd_fec_capability fec_cap; struct dpcd_dsc_capabilities dsc_caps; -#endif struct dc_lttpr_caps lttpr_caps; }; @@ -1016,14 +984,12 @@ struct dc_container_id { }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_sink_dsc_caps { // 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology), // 'false' if they are sink's DSC caps bool is_virtual_dpcd_dsc; struct dsc_dec_dpcd_caps dsc_dec_caps; }; -#endif /* * The sink structure contains EDID and other display device properties @@ -1038,9 +1004,7 @@ struct dc_sink { struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX]; bool converter_disable_audio; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_sink_dsc_caps sink_dsc_caps; -#endif /* private to DC core */ struct dc_link *link; @@ -1101,10 +1065,8 @@ bool dc_is_dmcu_initialized(struct dc *dc); enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) /******************************************************************************* * DSC Interfaces ******************************************************************************/ #include "dc_dsc.h" -#endif #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index ef79a686e4c2..4d3378d61938 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -129,9 +129,7 @@ struct dc_link_training_overrides { bool *alternate_scrambler_reset; bool *enhanced_framing; bool *mst_enable; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool *fec_enable; -#endif }; union dpcd_rev { @@ -536,7 +534,6 @@ union test_misc { unsigned char raw; }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* FEC capability DPCD register field bits-*/ union dpcd_fec_capability { struct { @@ -661,6 +658,5 @@ struct dpcd_dsc_capabilities { union dpcd_dsc_ext_capabilities dsc_ext_caps; }; -#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */ #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 0ed2962add5a..a782ae18a1c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -1,4 +1,3 @@ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #ifndef DC_DSC_H_ #define DC_DSC_H_ /* @@ -69,4 +68,3 @@ bool dc_dsc_compute_config( const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg); #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index e0856bb8511f..86043d431d40 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -167,12 +167,10 @@ enum surface_pixel_format { /*swaped & float*/ SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F, /*grow graphics here if necessary */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX, SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX, SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT, SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT, -#endif SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr = SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, @@ -180,10 +178,8 @@ enum surface_pixel_format { SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr, SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb, SURFACE_PIXEL_FORMAT_SUBSAMPLE_END, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010, SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102, -#endif SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888, SURFACE_PIXEL_FORMAT_INVALID @@ -222,12 +218,10 @@ enum tile_split_values { DC_ROTATED_MICRO_TILING = 0x3, }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum tripleBuffer_enable { DC_TRIPLEBUFFER_DISABLE = 0x0, DC_TRIPLEBUFFER_ENABLE = 0x1, }; -#endif /* TODO: These values come from hardware spec. We need to readdress this * if they ever change. @@ -427,13 +421,11 @@ struct dc_csc_transform { bool enable_adjustment; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_rgb_fixed { struct fixed31_32 red; struct fixed31_32 green; struct fixed31_32 blue; }; -#endif struct dc_gamma { struct kref refcount; @@ -468,10 +460,8 @@ enum dc_cursor_color_format { CURSOR_MODE_COLOR_1BIT_AND, CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA, CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED, CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED -#endif }; /* @@ -626,10 +616,8 @@ enum dc_color_depth { COLOR_DEPTH_121212, COLOR_DEPTH_141414, COLOR_DEPTH_161616, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 COLOR_DEPTH_999, COLOR_DEPTH_111111, -#endif COLOR_DEPTH_COUNT }; @@ -690,9 +678,7 @@ struct dc_crtc_timing_flags { * rates less than or equal to 340Mcsc */ uint32_t LTE_340MCSC_SCRAMBLE:1; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT uint32_t DSC : 1; /* Use DSC with this timing */ -#endif }; enum dc_timing_3d_format { @@ -717,7 +703,6 @@ enum dc_timing_3d_format { TIMING_3D_FORMAT_MAX, }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_dsc_config { uint32_t num_slices_h; /* Number of DSC slices - horizontal */ uint32_t num_slices_v; /* Number of DSC slices - vertical */ @@ -728,7 +713,6 @@ struct dc_dsc_config { bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */ int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */ }; -#endif struct dc_crtc_timing { uint32_t h_total; uint32_t h_border_left; @@ -755,9 +739,7 @@ struct dc_crtc_timing { enum scanning_type scan_type; struct dc_crtc_timing_flags flags; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_dsc_config dsc_cfg; -#endif }; #ifndef AMD_EDID_UTILITY @@ -796,7 +778,6 @@ enum vram_type { VIDEO_MEMORY_TYPE_GDDR6 = 6, }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dwb_cnv_out_bpc { DWB_CNV_OUT_BPC_8BPC = 0, DWB_CNV_OUT_BPC_10BPC = 1, @@ -847,7 +828,6 @@ struct mcif_buf_params { unsigned int swlock; }; -#endif #define MAX_TG_COLOR_VALUE 0x3FF struct tg_color { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 67ba6666a324..8971ce3a5480 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -29,13 +29,11 @@ #include "dc_types.h" #include "grph_object_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT enum dc_link_fec_state { dc_link_fec_not_ready, dc_link_fec_ready, dc_link_fec_enabled }; -#endif struct dc_link_status { bool link_active; struct dpcd_caps *dpcd_caps; @@ -141,9 +139,7 @@ struct dc_link { struct link_trace link_trace; struct gpio *hpd_gpio; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT enum dc_link_fec_state fec_state; -#endif }; const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 70274fc43a72..3ea54321b045 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -52,7 +52,6 @@ struct freesync_context { bool dummy; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) enum hubp_dmdata_mode { DMDATA_SW_MODE, DMDATA_HW_MODE @@ -82,9 +81,7 @@ struct dc_dmdata_attributes { /* An unbounded array of uint32s, represents software dmdata to be loaded */ uint32_t *dmdata_sw_data; }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_writeback_info { bool wb_enabled; int dwb_pipe_inst; @@ -96,7 +93,6 @@ struct dc_writeback_update { unsigned int num_wb_info; struct dc_writeback_info writeback_info[MAX_DWB_PIPES]; }; -#endif enum vertical_interrupt_ref_point { START_V_UPDATE = 0, @@ -121,9 +117,7 @@ union stream_update_flags { uint32_t abm_level:1; uint32_t dpms_off:1; uint32_t gamut_remap:1; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t wb_update:1; -#endif } bits; uint32_t raw; @@ -204,11 +198,9 @@ struct dc_stream_state { struct crtc_trigger_info triggered_crtc_reset; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* writeback */ unsigned int num_wb_info; struct dc_writeback_info writeback_info[MAX_DWB_PIPES]; -#endif /* Computed state bits */ bool mode_changed : 1; @@ -227,9 +219,7 @@ struct dc_stream_state { bool apply_seamless_boot_optimization; uint32_t stream_id; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool is_dsc_enabled; -#endif union stream_update_flags update_flags; }; @@ -260,12 +250,8 @@ struct dc_stream_update { struct dc_csc_transform *output_csc_transform; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_writeback_update *wb_update; -#endif -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) struct dc_dsc_config *dsc_config; -#endif }; bool dc_is_stream_unchanged( @@ -355,7 +341,6 @@ bool dc_add_all_planes_for_stream( int plane_count, struct dc_state *context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info); @@ -366,7 +351,6 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) bool dc_stream_set_dynamic_metadata(struct dc *dc, struct dc_stream_state *stream, struct dc_dmdata_attributes *dmdata_attr); -#endif enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 45dfed8bcaf7..1363e8907fbf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -468,7 +468,6 @@ enum display_content_type { DISPLAY_CONTENT_TYPE_GAME = 8 }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* writeback */ struct dwb_stereo_params { bool stereo_enabled; /* false: normal mode, true: 3D stereo */ @@ -499,7 +498,6 @@ struct dc_dwb_params { enum dwb_subsample_position subsample_position; struct dc_transfer_func *out_transfer_func; }; -#endif /* audio*/ @@ -607,9 +605,7 @@ enum dc_infoframe_type { DC_HDMI_INFOFRAME_TYPE_AVI = 0x82, DC_HDMI_INFOFRAME_TYPE_SPD = 0x83, DC_HDMI_INFOFRAME_TYPE_AUDIO = 0x84, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DC_DP_INFOFRAME_TYPE_PPS = 0x10, -#endif }; struct dc_info_packet { @@ -788,7 +784,6 @@ struct dc_clock_config { #endif /*AMD_EDID_UTILITY*/ //AMD EDID UTILITY does not need any of the above structures -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* DSC DPCD capabilities */ union dsc_slice_caps1 { struct { @@ -858,6 +853,5 @@ struct dsc_dec_dpcd_caps { uint32_t branch_overall_throughput_1_mps; /* In MPs */ uint32_t branch_max_line_width; }; -#endif #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index 7ba7e6f722f6..ba0caaffa24b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -67,7 +67,6 @@ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ NBIO_SR(BIOS_SCRATCH_2) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define ABM_DCN20_REG_LIST() \ ABM_COMMON_REG_LIST_DCE_BASE(), \ SR(DC_ABM1_HG_SAMPLE_RATE), \ @@ -81,7 +80,6 @@ SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \ SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \ NBIO_SR(BIOS_SCRATCH_2) -#endif #define ABM_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -163,9 +161,7 @@ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define ABM_MASK_SH_LIST_DCN20(mask_sh) ABM_MASK_SH_LIST_DCE110(mask_sh) -#endif #define ABM_REG_FIELD_LIST(type) \ type ABM1_HG_NUM_OF_BINS_SEL; \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index 2e2e925a506b..382465862f29 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -30,7 +30,6 @@ #include "inc/hw/aux_engine.h" -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define AUX_COMMON_REG_LIST0(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ SRI(AUX_ARB_CONTROL, DP_AUX, id), \ @@ -39,7 +38,6 @@ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \ SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \ SRI(AUX_SW_STATUS, DP_AUX, id) -#endif #define AUX_COMMON_REG_LIST(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index f787a6b94781..898decadb8e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -1004,7 +1004,6 @@ static bool get_pixel_clk_frequency_100hz( return false; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */ struct pixel_rate_range_table_entry { @@ -1064,7 +1063,6 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = { .get_pix_clk_dividers = dce112_get_pix_clk_dividers, .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz }; -#endif /*****************************************/ /* Constructor */ @@ -1435,7 +1433,6 @@ bool dce112_clk_src_construct( return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dcn20_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, @@ -1451,4 +1448,3 @@ bool dcn20_clk_src_construct( return ret; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index 43c1bf60b83c..5b4a29ee1696 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -55,7 +55,6 @@ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define CS_COMMON_REG_LIST_DCN2_0(index, pllid) \ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ SRII(PHASE, DP_DTO, 0),\ @@ -76,7 +75,6 @@ SRII(PIXEL_RATE_CNTL, OTG, 3),\ SRII(PIXEL_RATE_CNTL, OTG, 4),\ SRII(PIXEL_RATE_CNTL, OTG, 5) -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \ @@ -95,13 +93,11 @@ SRII(PIXEL_RATE_CNTL, OTG, 3) #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\ CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\ CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh) -#endif #if defined(CONFIG_DRM_AMD_DC_DCN1_0) @@ -201,7 +197,6 @@ bool dce112_clk_src_construct( const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dcn20_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, @@ -210,6 +205,5 @@ bool dcn20_clk_src_construct( const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask); -#endif #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 3276944e6997..4144b1055db2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -748,9 +748,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu) return true; } -#endif //(CONFIG_DRM_AMD_DC_DCN1_0) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static bool dcn20_lock_phy(struct dmcu *dmcu) { @@ -798,7 +796,7 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu) return true; } -#endif //(CONFIG_DRM_AMD_DC_DCN2_0) +#endif //(CONFIG_DRM_AMD_DC_DCN1_0) static const struct dmcu_funcs dce_funcs = { .dmcu_init = dce_dmcu_init, @@ -822,9 +820,7 @@ static const struct dmcu_funcs dcn10_funcs = { .get_psr_wait_loop = dcn10_get_psr_wait_loop, .is_dmcu_initialized = dcn10_is_dmcu_initialized }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static const struct dmcu_funcs dcn20_funcs = { .dmcu_init = dcn10_dmcu_init, .load_iram = dcn10_dmcu_load_iram, @@ -837,7 +833,6 @@ static const struct dmcu_funcs dcn20_funcs = { .lock_phy = dcn20_lock_phy, .unlock_phy = dcn20_unlock_phy }; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) static const struct dmcu_funcs dcn21_funcs = { @@ -853,6 +848,7 @@ static const struct dmcu_funcs dcn21_funcs = { .unlock_phy = dcn20_unlock_phy }; #endif +#endif static void dce_dmcu_construct( struct dce_dmcu *dmcu_dce, @@ -872,7 +868,7 @@ static void dce_dmcu_construct( dmcu_dce->dmcu_mask = dmcu_mask; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) +#if defined(CONFIG_DRM_AMD_DC_DCN1_0) static void dcn21_dmcu_construct( struct dce_dmcu *dmcu_dce, struct dc_context *ctx, @@ -934,9 +930,7 @@ struct dmcu *dcn10_dmcu_create( return &dmcu_dce->base; } -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dmcu *dcn20_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, @@ -957,7 +951,6 @@ struct dmcu *dcn20_dmcu_create( return &dmcu_dce->base; } -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct dmcu *dcn21_dmcu_create( @@ -981,6 +974,7 @@ struct dmcu *dcn21_dmcu_create( return &dmcu_dce->base; } #endif +#endif void dce_dmcu_destroy(struct dmcu **dmcu) { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h index 1a42b2cbb21b..89277899b507 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h @@ -266,13 +266,11 @@ struct dmcu *dcn10_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dmcu *dcn20_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask); -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct dmcu *dcn21_dmcu_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 32d145a0d6fc..f31eea1bdec0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -210,7 +210,6 @@ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define HWSEQ_DCN2_REG_LIST()\ HWSEQ_DCN_REG_LIST(), \ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \ @@ -276,7 +275,6 @@ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HWSEQ_DCN21_REG_LIST()\ @@ -577,7 +575,6 @@ struct dce_hwseq_registers { HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -637,7 +634,6 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index aad7b52165be..1cd4d8fc361f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -296,9 +296,7 @@ static bool setup_engine( struct dce_i2c_hw *dce_i2c_hw) { uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t reset_length = 0; -#endif /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); @@ -322,14 +320,12 @@ static bool setup_engine( REG_UPDATE_N(SETUP, 2, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) } else { reset_length = dce_i2c_hw->send_reset_length; REG_UPDATE_N(SETUP, 3, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH), reset_length, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); -#endif } /* Program HW priority * set to High - interrupt software I2C at any time @@ -705,7 +701,6 @@ void dcn1_i2c_hw_construct( dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn2_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, @@ -724,4 +719,3 @@ void dcn2_i2c_hw_construct( if (ctx->dc->debug.scl_reset_length10) dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_10; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h index cb0234e5d597..d4b2037f7d74 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h @@ -177,9 +177,7 @@ struct dce_i2c_shift { uint8_t DC_I2C_INDEX; uint8_t DC_I2C_INDEX_WRITE; uint8_t XTAL_REF_DIV; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint8_t DC_I2C_DDC1_SEND_RESET_LENGTH; -#endif uint8_t DC_I2C_REG_RW_CNTL_STATUS; }; @@ -220,17 +218,13 @@ struct dce_i2c_mask { uint32_t DC_I2C_INDEX; uint32_t DC_I2C_INDEX_WRITE; uint32_t XTAL_REF_DIV; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t DC_I2C_DDC1_SEND_RESET_LENGTH; -#endif uint32_t DC_I2C_REG_RW_CNTL_STATUS; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define I2C_COMMON_MASK_SH_LIST_DCN2(mask_sh)\ I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh),\ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH, mask_sh) -#endif struct dce_i2c_registers { uint32_t SETUP; @@ -312,7 +306,6 @@ void dcn1_i2c_hw_construct( const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn2_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, @@ -320,7 +313,6 @@ void dcn2_i2c_hw_construct( const struct dce_i2c_registers *regs, const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks); -#endif bool dce_i2c_submit_command_hw( struct resource_pool *pool, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 37f5bbcba155..811896a43b67 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1322,9 +1322,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( struct dc_stream_state *stream = pipe_ctx->stream; struct drr_params params = {0}; unsigned int event_triggers = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; -#endif if (dc->hwss.disable_stream_gating) { dc->hwss.disable_stream_gating(dc, pipe_ctx); @@ -1390,7 +1388,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.opp, &stream->bit_depth_params, &stream->clamping); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) while (odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion( odm_pipe->stream_res.opp, @@ -1404,7 +1401,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( &stream->clamping); odm_pipe = odm_pipe->next_odm_pipe; } -#endif if (!stream->dpms_off) core_link_enable_stream(context, pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 997e9582edc7..0e682b5aa3eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -290,12 +290,8 @@ void dpp1_cnv_setup ( enum surface_pixel_format format, enum expansion_mode mode, struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dc_color_space input_color_space, struct cnv_alpha_2bit_lut *alpha_2bit_lut) -#else - enum dc_color_space input_color_space) -#endif { uint32_t pixel_format; uint32_t alpha_en; @@ -542,11 +538,9 @@ static const struct dpp_funcs dcn10_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .dpp_program_blnd_lut = NULL, .dpp_program_shaper_lut = NULL, .dpp_program_3dlut = NULL -#endif }; static struct dpp_caps dcn10_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index 1d4a7d640334..2edf566b3a72 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1486,12 +1486,8 @@ void dpp1_cnv_setup ( enum surface_pixel_format format, enum expansion_mode mode, struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dc_color_space input_color_space, struct cnv_alpha_2bit_lut *alpha_2bit_lut); -#else - enum dc_color_space input_color_space); -#endif void dpp1_full_bypass(struct dpp *dpp_base); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index 6f1a312c6a5a..6b7593dd0c77 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -736,10 +736,8 @@ void dpp1_full_bypass(struct dpp *dpp_base) /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ if (dpp->tf_mask->CM_BYPASS_EN) REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); -#endif /* Setting degamma bypass for now */ REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index d67e0abeee93..fce37c527a0b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -218,14 +218,12 @@ static void dpp1_dscl_set_lb( INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else { /* DSCL caps: pixel data processed in float format */ REG_SET_2(LB_DATA_FORMAT, 0, INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ } -#endif REG_SET_2(LB_MEMORY_CTRL, 0, MEMORY_CONFIG, mem_size_config, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c index 374cc9acda3b..64b31edc8cf6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c @@ -109,9 +109,7 @@ const struct dwbc_funcs dcn10_dwbc_funcs = { .update = NULL, .set_stereo = NULL, .set_new_content = NULL, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .set_warmup = NULL, -#endif .dwb_set_scaler = NULL, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 5aeee938605a..31b64733d693 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -306,7 +306,6 @@ void hubp1_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 12); break; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 112); @@ -327,7 +326,6 @@ void hubp1_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 119); break; -#endif default: BREAK_TO_DEBUGGER(); break; @@ -1251,10 +1249,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = { .hubp_get_underflow_status = hubp1_get_underflow_status, .hubp_init = hubp1_init, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .dmdata_set_attributes = NULL, .dmdata_load = NULL, -#endif }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index e65e76f018e4..780af5b3c16f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -729,13 +729,11 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable, enum hubp_ind_block_size independent_64b_blks); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool hubp1_program_surface_flip_and_addr( struct hubp *hubp, const struct dc_plane_address *address, bool flip_immediate); -#endif bool hubp1_is_flip_pending(struct hubp *hubp); void hubp1_cursor_set_attributes( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 0a30d97b2656..fc07538f00ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -49,9 +49,7 @@ #include "clk_mgr.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif #define DC_LOGGER_INIT(logger) @@ -315,7 +313,6 @@ void dcn10_log_hw_state(struct dc *dc, /* Read shared OTG state registers for all DCNx */ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /* * For DCN2 and greater, a register on the OPP is used to * determine if the CRTC is blanked instead of the OTG. So use @@ -327,9 +324,6 @@ void dcn10_log_hw_state(struct dc *dc, s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]); else s.blank_enabled = tg->funcs->is_blanked(tg); -#else - s.blank_enabled = tg->funcs->is_blanked(tg); -#endif //only print if OTG master is enabled if ((s.otg_enabled & 1) == 0) @@ -364,7 +358,6 @@ void dcn10_log_hw_state(struct dc *dc, } DTN_INFO("\n"); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n"); for (i = 0; i < pool->res_cap->num_dsc; i++) { struct display_stream_compressor *dsc = pool->dscs[i]; @@ -419,7 +412,6 @@ void dcn10_log_hw_state(struct dc *dc, } } DTN_INFO("\n"); -#endif DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n" "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n", @@ -1273,11 +1265,9 @@ static void dcn10_init_hw(struct dc *dc) } /* Power gate DSCs */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < res_pool->res_cap->num_dsc; i++) if (dc->hwss.dsc_pg_control != NULL) dc->hwss.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); -#endif /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -2190,12 +2180,8 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) plane_state->format, EXPANSION_MODE_ZERO, plane_state->input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 plane_state->color_space, NULL); -#else - plane_state->color_space); -#endif //set scale and bias registers dcn10_build_prescale_params(&bns_params, plane_state); @@ -2649,11 +2635,9 @@ static void dcn10_apply_ctx_for_surface( if (num_planes > 0) program_all_pipe_in_tree(dc, top_pipe_to_program, context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* Program secondary blending tree and writeback pipes */ if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); -#endif if (interdependent_update) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c index 0fb9e440cb9d..f05371c1fc36 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c @@ -53,11 +53,9 @@ static const struct ipp_funcs dcn10_ipp_funcs = { .ipp_destroy = dcn10_ipp_destroy }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static const struct ipp_funcs dcn20_ipp_funcs = { .ipp_destroy = dcn10_ipp_destroy }; -#endif void dcn10_ipp_construct( struct dcn10_ipp *ippn10, @@ -76,7 +74,6 @@ void dcn10_ipp_construct( ippn10->ipp_mask = ipp_mask; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn20_ipp_construct( struct dcn10_ipp *ippn10, struct dc_context *ctx, @@ -93,4 +90,3 @@ void dcn20_ipp_construct( ippn10->ipp_shift = ipp_shift; ippn10->ipp_mask = ipp_mask; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h index cfa24459242b..f0e0d07b0311 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h @@ -49,7 +49,6 @@ SRI(CURSOR_HOT_SPOT, CURSOR, id), \ SRI(CURSOR_DST_OFFSET, CURSOR, id) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define IPP_REG_LIST_DCN20(id) \ IPP_REG_LIST_DCN(id), \ SRI(CURSOR_SETTINGS, HUBPREQ, id), \ @@ -60,7 +59,6 @@ SRI(CURSOR_POSITION, CURSOR0_, id), \ SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \ SRI(CURSOR_DST_OFFSET, CURSOR0_, id) -#endif #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L @@ -105,7 +103,6 @@ IPP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define IPP_MASK_SH_LIST_DCN20(mask_sh) \ IPP_MASK_SH_LIST_DCN(mask_sh), \ IPP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \ @@ -124,7 +121,6 @@ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \ IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh) -#endif #define IPP_DCN10_REG_FIELD_LIST(type) \ type CNVC_SURFACE_PIXEL_FORMAT; \ @@ -196,13 +192,11 @@ void dcn10_ipp_construct(struct dcn10_ipp *ippn10, const struct dcn10_ipp_shift *ipp_shift, const struct dcn10_ipp_mask *ipp_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn20_ipp_construct(struct dcn10_ipp *ippn10, struct dc_context *ctx, int inst, const struct dcn10_ipp_registers *regs, const struct dcn10_ipp_shift *ipp_shift, const struct dcn10_ipp_mask *ipp_mask); -#endif #endif /* _DCN10_IPP_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index 88fcc395adf5..7493a630f4dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -72,9 +72,7 @@ struct dcn10_link_enc_aux_registers { uint32_t AUX_CONTROL; uint32_t AUX_DPHY_RX_CONTROL0; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 uint32_t AUX_DPHY_TX_CONTROL; -#endif }; struct dcn10_link_enc_hpd_registers { @@ -106,7 +104,6 @@ struct dcn10_link_enc_registers { uint32_t DP_DPHY_HBR2_PATTERN_CONTROL; uint32_t DP_SEC_CNTL1; uint32_t TMDS_CTL_BITS; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* DCCG */ uint32_t CLOCK_ENABLE; /* DIG */ @@ -136,7 +133,6 @@ struct dcn10_link_enc_registers { uint32_t RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3; uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2; uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3; -#endif }; #define LE_SF(reg_name, field_name, post_fix)\ @@ -242,7 +238,6 @@ struct dcn10_link_enc_registers { type AUX_LS_READ_EN;\ type AUX_RX_RECEIVE_WINDOW -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DCN20_LINK_ENCODER_DPCS_REG_FIELD_LIST(type) \ type RDPCS_PHY_DP_TX0_DATA_EN;\ @@ -423,20 +418,15 @@ struct dcn10_link_enc_registers { type AUX_TX_PRECHARGE_SYMBOLS; \ type AUX_MODE_DET_CHECK_DELAY;\ type DPCS_DBG_CBUS_DIS -#endif struct dcn10_link_enc_shift { DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t); -#endif }; struct dcn10_link_enc_mask { DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t); -#endif }; struct dcn10_link_encoder { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 8b2f29f6dabd..220154f7911a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -457,12 +457,10 @@ static const struct mpc_funcs dcn10_mpc_funcs = { .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect, .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, .update_blending = mpc1_update_blending, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .set_denorm = NULL, .set_denorm_clamp = NULL, .set_output_csc = NULL, .set_output_gamma = NULL, -#endif }; void dcn10_mpc_construct(struct dcn10_mpc *mpc10, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 33a3dcdb3fd1..d79718fde5a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -373,11 +373,9 @@ void opp1_program_oppbuf( */ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* Controls the number of padded pixels at the end of a segment */ if (REG(OPPBUF_CONTROL1)) REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels); -#endif } void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) @@ -404,10 +402,8 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, .opp_program_stereo = opp1_program_stereo, .opp_pipe_clock_control = opp1_pipe_clock_control, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .opp_set_disp_pattern_generator = NULL, .dpg_is_blanked = NULL, -#endif .opp_destroy = opp1_destroy }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 30c025918568..cd7412dc42d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -1502,7 +1502,6 @@ void dcn10_timing_generator_init(struct optc *optc1) optc1->min_v_sync_width = 1; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* "Containter" vs. "pixel" is a concept within HW blocks, mostly those closer to the back-end. It works like this: * * - In most of the formats (RGB or YCbCr 4:4:4, 4:2:2 uncompressed and DSC 4:2:2 Simple) pixel rate is the same as @@ -1515,15 +1514,12 @@ void dcn10_timing_generator_init(struct optc *optc1) * to it) and has to be treated the same as 4:2:0, i.e. target containter rate has to be halved in this case as well. * */ -#endif bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 && !timing->dsc_cfg.ycbcr422_simple); -#endif return two_pix; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 4476bc8cdb4d..3afeb1a30f21 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -165,13 +165,11 @@ struct dcn_optc_registers { uint32_t OTG_CRC0_WINDOWB_X_CONTROL; uint32_t OTG_CRC0_WINDOWB_Y_CONTROL; uint32_t GSL_SOURCE_SELECT; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 uint32_t DWB_SOURCE_SELECT; uint32_t OTG_DSC_START_POSITION; uint32_t OPTC_DATA_FORMAT_CONTROL; uint32_t OPTC_BYTES_PER_PIXEL; uint32_t OPTC_WIDTH_CONTROL; -#endif }; #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ @@ -456,7 +454,6 @@ struct dcn_optc_registers { type MANUAL_FLOW_CONTROL;\ type MANUAL_FLOW_CONTROL_SEL; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define TG_REG_FIELD_LIST(type) \ TG_REG_FIELD_LIST_DCN1_0(type)\ @@ -479,12 +476,6 @@ struct dcn_optc_registers { type OPTC_DWB0_SOURCE_SELECT;\ type OPTC_DWB1_SOURCE_SELECT; -#else - -#define TG_REG_FIELD_LIST(type) \ - TG_REG_FIELD_LIST_DCN1_0(type) - -#endif struct dcn_optc_shift { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 2f00f2389e40..f9b9e221c698 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -163,14 +163,12 @@ struct dcn10_stream_enc_registers { uint32_t DP_MSA_TIMING_PARAM3; uint32_t DP_MSA_TIMING_PARAM4; uint32_t HDMI_DB_CONTROL; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t DP_DSC_CNTL; uint32_t DP_DSC_BYTES_PER_PIXEL; uint32_t DME_CONTROL; uint32_t DP_SEC_METADATA_TRANSMISSION; uint32_t HDMI_METADATA_PACKET_CONTROL; uint32_t DP_SEC_FRAMING4; -#endif uint32_t DIG_CLOCK_PATTERN; }; @@ -466,7 +464,6 @@ struct dcn10_stream_enc_registers { type DIG_SOURCE_SELECT;\ type DIG_CLOCK_PATTERN -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define SE_REG_FIELD_LIST_DCN2_0(type) \ type DP_DSC_MODE;\ type DP_DSC_SLICE_WIDTH;\ @@ -485,20 +482,15 @@ struct dcn10_stream_enc_registers { type DOLBY_VISION_EN;\ type DP_PIXEL_COMBINE;\ type DP_SST_SDP_SPLITTING -#endif struct dcn10_stream_encoder_shift { SE_REG_FIELD_LIST_DCN1_0(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SE_REG_FIELD_LIST_DCN2_0(uint8_t); -#endif }; struct dcn10_stream_encoder_mask { SE_REG_FIELD_LIST_DCN1_0(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SE_REG_FIELD_LIST_DCN2_0(uint32_t); -#endif }; struct dcn10_stream_encoder { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index ddb8d5649e79..1eebaac81b36 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -6,9 +6,7 @@ DCN20 = dcn20_resource.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o -ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DCN20 += dcn20_dsc.o -endif ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) cc_stack_align := -mpreferred-stack-boundary=4 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index dc9944427d2f..0111545dac75 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "reg_helper.h" #include "dcn20_dsc.h" #include "dsc/dscc_types.h" @@ -734,4 +733,3 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const } } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h index 4e2fb38390a4..9855a7ed0387 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h @@ -21,7 +21,6 @@ * Authors: AMD * */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #ifndef __DCN20_DSC_H__ #define __DCN20_DSC_H__ @@ -572,4 +571,3 @@ void dsc2_construct(struct dcn20_dsc *dsc, #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 69e2aae42394..f04325604f6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -483,7 +483,6 @@ void hubp2_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 12); break; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 112); @@ -504,7 +503,6 @@ void hubp2_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 119); break; -#endif default: BREAK_TO_DEBUGGER(); break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 921a36668ced..0046a099c9e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -33,9 +33,7 @@ #include "dcn10/dcn10_hw_sequencer.h" #include "dcn20_hwseq.h" #include "dce/dce_hwseq.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dcn20/dcn20_dsc.h" -#endif #include "abm.h" #include "clk_mgr.h" #include "dmcu.h" @@ -243,7 +241,6 @@ void dcn20_init_blank( dcn20_hwss_wait_for_blank_complete(opp); } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static void dcn20_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, @@ -320,7 +317,6 @@ static void dcn20_dsc_pg_control( if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } -#endif static void dcn20_dpp_pg_control( struct dce_hwseq *hws, @@ -1696,7 +1692,6 @@ bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dce_hwseq *hws = dc->hwseq; if (pipe_ctx->stream_res.dsc) { @@ -1708,12 +1703,10 @@ static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx odm_pipe = odm_pipe->next_odm_pipe; } } -#endif } static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dce_hwseq *hws = dc->hwseq; if (pipe_ctx->stream_res.dsc) { @@ -1725,7 +1718,6 @@ static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) odm_pipe = odm_pipe->next_odm_pipe; } } -#endif } void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) @@ -1924,11 +1916,9 @@ static void dcn20_reset_back_end_for_pipe( } } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT else if (pipe_ctx->stream_res.dsc) { dp_set_dsc_enable(pipe_ctx, false); } -#endif /* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable @@ -2440,11 +2430,7 @@ void dcn20_hw_sequencer_construct(struct dc *dc) dc->hwss.enable_power_gating_plane = dcn20_enable_power_gating_plane; dc->hwss.dpp_pg_control = dcn20_dpp_pg_control; dc->hwss.hubp_pg_control = dcn20_hubp_pg_control; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT dc->hwss.dsc_pg_control = dcn20_dsc_pg_control; -#else - dc->hwss.dsc_pg_control = NULL; -#endif dc->hwss.disable_vga = dcn20_disable_vga; if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index 0e0306d84cd8..e4ac73035c84 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -168,10 +168,8 @@ static struct mpll_cfg dcn2_mpll_cfg[] = { void enc2_fec_set_enable(struct link_encoder *enc, bool enable) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DC_LOG_DSC("%s FEC at link encoder inst %d", enable ? "Enabling" : "Disabling", enc->id.enum_id); -#endif REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable); } @@ -192,7 +190,6 @@ bool enc2_fec_is_active(struct link_encoder *enc) return (active != 0); } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* this function reads dsc related register fields to be logged later in dcn10_log_hw_state * into a dcn_dsc_state struct. */ @@ -205,7 +202,6 @@ void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s) REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status); REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete); } -#endif static bool update_cfg_data( struct dcn10_link_encoder *enc10, @@ -316,9 +312,7 @@ void enc2_hw_init(struct link_encoder *enc) } static const struct link_encoder_funcs dcn20_link_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .read_state = link_enc2_read_state, -#endif .validate_output_with_stream = dcn10_link_encoder_validate_output_with_stream, .hw_init = enc2_hw_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index 0c98a0bbbd14..62dfd34c69f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -158,9 +158,7 @@ void enc2_fec_set_ready(struct link_encoder *enc, bool ready); bool enc2_fec_is_active(struct link_encoder *enc); void enc2_hw_init(struct link_encoder *enc); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s); -#endif void dcn20_link_encoder_enable_dp_output( struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 3b613fb93ef8..0e50dc9b611a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -167,7 +167,6 @@ void optc2_set_gsl_source_select( } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */ void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc, int x_position, @@ -201,7 +200,6 @@ void optc2_set_dsc_config(struct timing_generator *optc, REG_UPDATE(OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, dsc_slice_width); } -#endif /** * PTI i think is already done somewhere else for 2ka @@ -448,9 +446,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .setup_global_swap_lock = NULL, .get_crc = optc1_get_crc, .configure_crc = optc1_configure_crc, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .set_dsc_config = optc2_set_dsc_config, -#endif .set_dwb_source = optc2_set_dwb_source, .set_odm_bypass = optc2_set_odm_bypass, .set_odm_combine = optc2_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index 32a58431fd09..9ae22146d2d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -86,12 +86,10 @@ void optc2_set_gsl_source_select(struct timing_generator *optc, int group_idx, uint32_t gsl_ready_signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void optc2_set_dsc_config(struct timing_generator *optc, enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); -#endif void optc2_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index d3d26796a9b8..d246d94e9b51 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -45,9 +45,7 @@ #include "dcn10/dcn10_resource.h" #include "dcn20_opp.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dcn20_dsc.h" -#endif #include "dcn20_link_encoder.h" #include "dcn20_stream_encoder.h" @@ -95,11 +93,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = { .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 0, .pte_group_size_bytes = 2048, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 6, -#else - .num_dsc = 0, -#endif .rob_buffer_size_kbytes = 168, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 84, @@ -771,7 +765,6 @@ static int map_transmitter_id_to_phy_instance( } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define dsc_regsDCN20(id)\ [id] = {\ DSC_REG_LIST_DCN20(id)\ @@ -793,7 +786,6 @@ static const struct dcn20_dsc_shift dsc_shift = { static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; -#endif static const struct dccg_registers dccg_regs = { DCCG_REG_LIST_DCN2() @@ -817,9 +809,7 @@ static const struct resource_caps res_cap_nv10 = { .num_dwb = 1, .num_ddc = 6, .num_vmid = 16, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 6, -#endif }; static const struct dc_plane_cap plane_cap = { @@ -1213,7 +1203,6 @@ void dcn20_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst) @@ -1236,7 +1225,6 @@ void dcn20_dsc_destroy(struct display_stream_compressor **dsc) *dsc = NULL; } -#endif static void destruct(struct dcn20_resource_pool *pool) { @@ -1249,12 +1237,10 @@ static void destruct(struct dcn20_resource_pool *pool) } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } -#endif if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); @@ -1465,7 +1451,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state return status; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static void acquire_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1499,10 +1484,8 @@ static void release_dsc(struct resource_context *res_ctx, } } -#endif -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static enum dc_status add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream) @@ -1554,7 +1537,6 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc, else return DC_OK; } -#endif enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) @@ -1566,11 +1548,9 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, if (result == DC_OK) result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Get a DSC if required and available */ if (result == DC_OK && dc_stream->timing.flags.DSC) result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream); -#endif if (result == DC_OK) result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream); @@ -1583,9 +1563,7 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ { enum dc_status result = DC_OK; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream); -#endif return result; } @@ -1668,9 +1646,7 @@ bool dcn20_split_stream_for_odm( next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT next_odm_pipe->stream_res.dsc = NULL; -#endif if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; @@ -1716,14 +1692,12 @@ bool dcn20_split_stream_for_odm( sd->recout.x = 0; } next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (next_odm_pipe->stream->timing.flags.DSC == 1) { acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc); ASSERT(next_odm_pipe->stream_res.dsc); if (next_odm_pipe->stream_res.dsc == NULL) return false; } -#endif return true; } @@ -1747,9 +1721,7 @@ void dcn20_split_stream_for_mpc( secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT secondary_pipe->stream_res.dsc = NULL; -#endif if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) { ASSERT(!secondary_pipe->bottom_pipe); secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; @@ -1833,11 +1805,9 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.dcc = 0; pipes[pipe_cnt].pipe.src.vm = 0;*/ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; /* todo: rotation?*/ pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; -#endif if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; /* 1/2 vblank */ @@ -1925,14 +1895,12 @@ int dcn20_populate_dml_pipes_from_context( case COLOR_DEPTH_161616: output_bpc = 16; break; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 case COLOR_DEPTH_999: output_bpc = 9; break; case COLOR_DEPTH_111111: output_bpc = 11; break; -#endif default: output_bpc = 8; break; @@ -1960,10 +1928,8 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; -#endif /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; @@ -2185,7 +2151,6 @@ void dcn20_set_mcif_arb_params( } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) { int i; @@ -2219,7 +2184,6 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) } return true; } -#endif struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, struct resource_context *res_ctx, @@ -2322,10 +2286,8 @@ void dcn20_merge_pipes_for_validate( odm_pipe->bottom_pipe = NULL; odm_pipe->prev_odm_pipe = NULL; odm_pipe->next_odm_pipe = NULL; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (odm_pipe->stream_res.dsc) release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); -#endif /* Clear plane_res and stream_res */ memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res)); memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); @@ -2545,14 +2507,12 @@ bool dcn20_fast_validate_bw( ASSERT(0); } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Actual dsc count per stream dsc validation*/ if (!dcn20_validate_dsc(dc, context)) { context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; goto validate_fail; } -#endif *vlevel_out = vlevel; @@ -3654,7 +3614,6 @@ static bool construct( goto create_fail; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn20_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { @@ -3663,7 +3622,6 @@ static bool construct( goto create_fail; } } -#endif if (!dcn20_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index fef473d68a4a..fa00989584dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -127,9 +127,7 @@ int dcn20_validate_apply_pipe_split_flags( struct dc_state *context, int vlevel, bool *split); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); -#endif void dcn20_split_stream_for_mpc( struct resource_context *res_ctx, const struct resource_pool *pool, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 6c2d82ce43ab..33cc40fb9687 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -205,7 +205,6 @@ static void enc2_stream_encoder_stop_hdmi_info_packets( HDMI_GENERIC7_LINE, 0); } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Update GSP7 SDP 128 byte long */ static void enc2_update_gsp7_128_info_packet( @@ -360,7 +359,6 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s) REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable); } } -#endif /* Set Dynamic Metadata-configuration. * enable_dme: TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME @@ -440,10 +438,8 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 && !timing->dsc_cfg.ycbcr422_simple); -#endif return two_pix; } @@ -587,11 +583,9 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .enc_read_state = enc2_read_state, .dp_set_dsc_config = enc2_dp_set_dsc_config, .dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet, -#endif .set_dynamic_metadata = enc2_set_dynamic_metadata, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c index e8a504ca5890..e45683ac871a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -323,9 +323,7 @@ void dcn21_link_encoder_disable_output( static const struct link_encoder_funcs dcn21_link_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .read_state = link_enc2_read_state, -#endif .validate_output_with_stream = dcn10_link_encoder_validate_output_with_stream, .hw_init = enc2_hw_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 44dc1d15c334..0f6e2a08b663 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -88,11 +88,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { .gpuvm_max_page_table_levels = 1, .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 2, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 3, -#else - .num_dsc = 0, -#endif .rob_buffer_size_kbytes = 168, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 44, @@ -538,7 +534,6 @@ static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define dsc_regsDCN20(id)\ [id] = {\ DSC_REG_LIST_DCN20(id)\ @@ -560,7 +555,6 @@ static const struct dcn20_dsc_shift dsc_shift = { static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; -#endif #define ipp_regs(id)\ [id] = {\ @@ -757,9 +751,7 @@ static const struct resource_caps res_cap_rn = { .num_dwb = 1, .num_ddc = 5, .num_vmid = 1, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 3, -#endif }; #ifdef DIAGS_BUILD @@ -784,9 +776,7 @@ static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = { .num_pll = 4, .num_dwb = 1, .num_ddc = 4, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 2, -#endif }; #endif @@ -865,12 +855,10 @@ static void destruct(struct dcn21_resource_pool *pool) } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } -#endif if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); @@ -1299,7 +1287,6 @@ static void read_dce_straps( } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dcn21_dsc_create( struct dc_context *ctx, uint32_t inst) @@ -1315,7 +1302,6 @@ struct display_stream_compressor *dcn21_dsc_create( dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); return &dsc->base; } -#endif static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { @@ -1853,7 +1839,6 @@ static bool construct( goto create_fail; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn21_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { @@ -1862,7 +1847,6 @@ static bool construct( goto create_fail; } } -#endif if (!dcn20_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 94b75e942607..8bde1d688f2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -118,13 +118,11 @@ bool dm_helpers_submit_i2c( const struct dc_link *link, struct i2c_command *cmd); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, const struct dc_stream_state *stream, bool enable ); -#endif bool dm_helpers_is_dp_sink_present( struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index ef7df9ef6d7e..dc9dbbc51d1c 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -41,9 +41,7 @@ enum pp_smu_ver { */ PP_SMU_UNSUPPORTED, PP_SMU_VER_RV, -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0 PP_SMU_VER_NV, -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) PP_SMU_VER_RN, #endif @@ -143,7 +141,6 @@ struct pp_smu_funcs_rv { void (*set_pme_wa_enable)(struct pp_smu *pp); }; -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0 /* Used by pp_smu_funcs_nv.set_voltage_by_freq * */ @@ -247,7 +244,6 @@ struct pp_smu_funcs_nv { enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp, BOOLEAN pstate_handshake_supported); }; -#endif #define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8 #define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8 @@ -291,9 +287,7 @@ struct pp_smu_funcs { struct pp_smu ctx; union { struct pp_smu_funcs_rv rv_funcs; -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0 struct pp_smu_funcs_nv nv_funcs; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct pp_smu_funcs_rn rn_funcs; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 5b2a65b42403..be9815b5f1e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -38,7 +38,7 @@ endif CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN1_0 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) @@ -56,7 +56,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags) DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ dml_common_defs.o -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN1_0 DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index ba77957aefe3..945291d5ad98 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "../display_mode_lib.h" #include "../dml_inline_defs.h" @@ -6126,4 +6125,3 @@ static double CalculateExtraLatency( return CalculateExtraLatency; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index a1f207cbb966..a4b103eb4b02 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "../display_mode_lib.h" #include "../display_mode_vba.h" @@ -1820,4 +1819,3 @@ static void calculate_ttu_cursor( } } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index 1c97083b8d0b..55d4cb23a073 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -135,9 +135,7 @@ enum dm_validation_status { DML_FAIL_DIO_SUPPORT, DML_FAIL_NOT_ENOUGH_DSC, DML_FAIL_DSC_CLK_REQUIRED, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DML_FAIL_DSC_VALIDATION_FAILURE, -#endif DML_FAIL_URGENT_LATENCY, DML_FAIL_REORDERING_BUFFER, DML_FAIL_DISPCLK_DPPCLK, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index 704efefdcba8..9c6016e57d2b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -25,18 +25,15 @@ #include "display_mode_lib.h" #include "dc_features.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/display_mode_vba_20.h" #include "dcn20/display_rq_dlg_calc_20.h" #include "dcn20/display_mode_vba_20v2.h" #include "dcn20/display_rq_dlg_calc_20v2.h" -#endif #ifdef CONFIG_DRM_AMD_DC_DCN2_1 #include "dcn21/display_mode_vba_21.h" #include "dcn21/display_rq_dlg_calc_21.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) const struct dml_funcs dml20_funcs = { .validate = dml20_ModeSupportAndSystemConfigurationFull, .recalculate = dml20_recalculate, @@ -50,7 +47,6 @@ const struct dml_funcs dml20v2_funcs = { .rq_dlg_get_dlg_reg = dml20v2_rq_dlg_get_dlg_reg, .rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg }; -#endif #ifdef CONFIG_DRM_AMD_DC_DCN2_1 const struct dml_funcs dml21_funcs = { @@ -70,14 +66,12 @@ void dml_init_instance(struct display_mode_lib *lib, lib->ip = *ip_params; lib->project = project; switch (project) { -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 case DML_PROJECT_NAVI10: lib->funcs = dml20_funcs; break; case DML_PROJECT_NAVI10v2: lib->funcs = dml20v2_funcs; break; -#endif #ifdef CONFIG_DRM_AMD_DC_DCN2_1 case DML_PROJECT_DCN21: lib->funcs = dml21_funcs; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index d8c59aa356b6..212188be1ec1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -27,17 +27,13 @@ #include "dml_common_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "display_mode_vba.h" -#endif enum dml_project { DML_PROJECT_UNDEFINED, DML_PROJECT_RAVEN1, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 DML_PROJECT_NAVI10, DML_PROJECT_NAVI10v2, -#endif #ifdef CONFIG_DRM_AMD_DC_DCN2_1 DML_PROJECT_DCN21, #endif @@ -70,9 +66,7 @@ struct display_mode_lib { struct _vcs_dpi_ip_params_st ip; struct _vcs_dpi_soc_bounding_box_st soc; enum dml_project project; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct vba_vars_st vba; -#endif struct dal_logger *logger; struct dml_funcs funcs; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 19356180cbb6..516396d53d01 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -146,7 +146,6 @@ struct _vcs_dpi_ip_params_st { unsigned int writeback_interface_buffer_size_kbytes; unsigned int writeback_line_buffer_buffer_size; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 unsigned int writeback_10bpc420_supported; double writeback_max_hscl_ratio; double writeback_max_vscl_ratio; @@ -156,7 +155,6 @@ struct _vcs_dpi_ip_params_st { unsigned int writeback_max_vscl_taps; unsigned int writeback_line_buffer_luma_buffer_size; unsigned int writeback_line_buffer_chroma_buffer_size; -#endif unsigned int max_page_table_levels; unsigned int max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index da5e9d2fd6b6..b1c2b79e42b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "display_mode_lib.h" #include "display_mode_vba.h" @@ -862,4 +861,3 @@ double CalculateWriteBackDISPCLK( return CalculateWriteBackDISPCLK; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 6d8b5c61de68..3eb657ed5714 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #ifndef __DML2_DISPLAY_MODE_VBA_H__ #define __DML2_DISPLAY_MODE_VBA_H__ @@ -872,4 +871,3 @@ double CalculateWriteBackDISPCLK( unsigned int WritebackChromaLineBufferWidth); #endif /* _DML2_DISPLAY_MODE_VBA_H_ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index e60f760585e4..dabd3b7a4cdc 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -22,7 +22,6 @@ * Author: AMD */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dc_hw_types.h" #include "dsc.h" #include @@ -903,4 +902,3 @@ bool dc_dsc_compute_config( timing, dsc_min_slice_height_override, dsc_cfg); return is_dsc_possible; } -#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */ diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h index 020ad8f685ea..9f70e87b3ecb 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -51,4 +50,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h index f66d006eac5d..e5fac9f4181d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -703,4 +702,3 @@ const qp_table qp_table_422_8bpc_max = { { 16, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} } }; -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index 76c4b12d6824..03ae15946c6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -252,4 +251,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com rc->rc_buf_thresh[13] = 8064; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index f1d6e793bc61..b6b1f09c2009 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -82,4 +81,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 73172fd0b529..1f6e63b71456 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) /* * Copyright 2012-17 Advanced Micro Devices, Inc. * @@ -144,4 +143,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par return ret; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile index b3062275711e..7791cd29fc18 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile @@ -67,12 +67,10 @@ GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10)) AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN10) -endif ############################################################################### # DCN 2 ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_0 GPIO_DCN20 = hw_translate_dcn20.o hw_factory_dcn20.o AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20)) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c index 2664cb22dfe7..83f798cb8b21 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dm_services.h" #include "include/gpio_types.h" #include "../hw_factory.h" @@ -258,4 +257,3 @@ void dal_hw_factory_dcn20_init(struct hw_factory *factory) factory->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h index 43a4ce7aa3bf..0fd9b315bd7a 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #ifndef __DAL_HW_FACTORY_DCN20_H__ #define __DAL_HW_FACTORY_DCN20_H__ @@ -30,4 +29,3 @@ void dal_hw_factory_dcn20_init(struct hw_factory *factory); #endif /* __DAL_HW_FACTORY_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c index 915e896e0e91..52ba62b3b5e4 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c @@ -26,7 +26,6 @@ /* * Pre-requisites: headers required by header of this unit */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "hw_translate_dcn20.h" #include "dm_services.h" @@ -379,4 +378,3 @@ void dal_hw_translate_dcn20_init(struct hw_translate *tr) tr->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h index 01f52c7bed86..5f7a35530e26 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #ifndef __DAL_HW_TRANSLATE_DCN20_H__ #define __DAL_HW_TRANSLATE_DCN20_H__ @@ -32,4 +31,3 @@ struct hw_translate; void dal_hw_translate_dcn20_init(struct hw_translate *tr); #endif /* __DAL_HW_TRANSLATE_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c index 8572678f8d4f..907c5911eb9e 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dm_services.h" #include "include/gpio_types.h" #include "../hw_factory.h" @@ -239,4 +238,3 @@ void dal_hw_factory_dcn21_init(struct hw_factory *factory) factory->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c index fbb58fb8c318..291966efe63d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c @@ -26,7 +26,6 @@ /* * Pre-requisites: headers required by header of this unit */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "hw_translate_dcn21.h" #include "dm_services.h" @@ -382,4 +381,3 @@ void dal_hw_translate_dcn21_init(struct hw_translate *tr) tr->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h index f91e85b04956..308a543178a5 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h @@ -48,13 +48,11 @@ DDC_GPIO_REG_LIST(cd,id),\ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DDC_REG_LIST_DCN2(cd, id) \ DDC_GPIO_REG_LIST(cd, id),\ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP),\ .phy_aux_cntl = REG(PHY_AUX_CNTL), \ .dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5) -#endif #define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\ .type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\ @@ -90,13 +88,11 @@ DDC_GPIO_I2C_REG_LIST(cd),\ .ddc_setup = 0 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DDC_I2C_REG_LIST_DCN2(cd) \ DDC_GPIO_I2C_REG_LIST(cd),\ .ddc_setup = 0,\ .phy_aux_cntl = REG(PHY_AUX_CNTL), \ .dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5) -#endif #define DDC_MASK_SH_LIST_COMMON(mask_sh) \ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\ @@ -110,22 +106,18 @@ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DDC_MASK_SH_LIST_DCN2(mask_sh, cd) \ {DDC_MASK_SH_LIST_COMMON(mask_sh),\ 0,\ 0,\ (PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\ (DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)} -#endif struct ddc_registers { struct gpio_registers gpio; uint32_t ddc_setup; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t phy_aux_cntl; uint32_t dc_gpio_aux_ctrl_5; -#endif }; struct ddc_sh_mask { @@ -140,11 +132,9 @@ struct ddc_sh_mask { /* i2cpad_mask */ uint32_t DC_GPIO_SDA_PD_DIS; uint32_t DC_GPIO_SCL_PD_DIS; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) //phy_aux_cntl uint32_t AUX_PAD_RXSEL; uint32_t DDC_PAD_I2CMODE; -#endif }; @@ -180,7 +170,6 @@ struct ddc_sh_mask { {\ DDC_I2C_REG_LIST(SCL)\ } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define ddc_data_regs_dcn2(id) \ {\ DDC_REG_LIST_DCN2(DATA, id)\ @@ -200,7 +189,6 @@ struct ddc_sh_mask { {\ DDC_REG_LIST_DCN2(SCL)\ } -#endif #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c index 1c12961f6472..a9aee1332330 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c @@ -150,7 +150,6 @@ static enum gpio_result set_config( AUX_PAD1_MODE, 0); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) { REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 1); } @@ -158,7 +157,6 @@ static enum gpio_result set_config( if (ddc->regs->phy_aux_cntl != 0) { REG_UPDATE(phy_aux_cntl, AUX_PAD_RXSEL, 1); } -#endif return GPIO_RESULT_OK; case GPIO_DDC_CONFIG_TYPE_MODE_AUX: /* set the AUX pad mode */ @@ -166,12 +164,10 @@ static enum gpio_result set_config( REG_SET(gpio.MASK_reg, regval, AUX_PAD1_MODE, 1); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) { REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 0); } -#endif return GPIO_RESULT_OK; case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index fa9f1d055ec8..edd70292cf86 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -48,9 +48,7 @@ #if defined(CONFIG_DRM_AMD_DC_DCN1_0) #include "dcn10/hw_factory_dcn10.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/hw_factory_dcn20.h" -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/hw_factory_dcn21.h" #endif @@ -95,17 +93,15 @@ bool dal_hw_factory_init( case DCN_VERSION_1_01: dal_hw_factory_dcn10_init(factory); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: dal_hw_factory_dcn20_init(factory); return true; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: dal_hw_factory_dcn21_init(factory); return true; +#endif #endif default: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index f2046f55d6a8..8e10bff4c074 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -46,9 +46,7 @@ #if defined(CONFIG_DRM_AMD_DC_DCN1_0) #include "dcn10/hw_translate_dcn10.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/hw_translate_dcn20.h" -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/hw_translate_dcn21.h" #endif @@ -90,17 +88,15 @@ bool dal_hw_translate_init( case DCN_VERSION_1_01: dal_hw_translate_dcn10_init(translate); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: dal_hw_translate_dcn20_init(translate); return true; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: dal_hw_translate_dcn21_init(translate); return true; +#endif #endif default: diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index fd39e2abe2ed..4ead89dd7c41 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -43,10 +43,8 @@ enum dc_status { DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */ DC_FAIL_SCALING = 14, DC_FAIL_DP_LINK_TRAINING = 15, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 DC_FAIL_DSC_VALIDATE = 16, DC_NO_DSC_RESOURCE = 17, -#endif DC_FAIL_UNSUPPORTED_1 = 18, DC_FAIL_CLK_EXCEED_MAX = 21, DC_FAIL_CLK_BELOW_MIN = 22, /*THIS IS MIN PER IP*/ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index fc9decc0a8fc..4e00c26c4eea 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -36,10 +36,8 @@ #if defined(CONFIG_DRM_AMD_DC_DCN1_0) #include "mpc.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dwb.h" #include "mcif_wb.h" -#endif #define MAX_CLOCK_SOURCES 7 @@ -135,7 +133,6 @@ struct resource_funcs { struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*populate_dml_writeback_from_context)( struct dc *dc, struct resource_context *res_ctx, @@ -146,7 +143,6 @@ struct resource_funcs { struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt); -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) void (*update_bw_bounding_box)( struct dc *dc, @@ -180,7 +176,6 @@ struct resource_pool { struct dce_i2c_sw *sw_i2cs[MAX_PIPES]; bool i2c_hw_buffer_in_use; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dwbc *dwbc[MAX_DWB_PIPES]; struct mcif_wb *mcif_wb[MAX_DWB_PIPES]; struct { @@ -188,11 +183,8 @@ struct resource_pool { unsigned int gsl_1:1; unsigned int gsl_2:1; } gsl_groups; -#endif -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dscs[MAX_PIPES]; -#endif unsigned int pipe_count; unsigned int underlay_pipe_index; @@ -206,9 +198,7 @@ struct resource_pool { unsigned int timing_generator_count; unsigned int mpcc_count; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) unsigned int writeback_pipe_count; -#endif /* * reserved clock source for DP */ @@ -240,9 +230,7 @@ struct dcn_fe_bandwidth { struct stream_resource { struct output_pixel_processor *opp; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dsc; -#endif struct timing_generator *tg; struct stream_encoder *stream_enc; struct audio *audio; @@ -251,12 +239,10 @@ struct stream_resource { struct encoder_info_frame encoder_info_frame; struct abm *abm; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* There are only (num_pipes+1)/2 groups. 0 means unassigned, * otherwise it's using group number 'gsl_group-1' */ uint8_t gsl_group; -#endif }; struct plane_resource { @@ -315,10 +301,8 @@ struct pipe_ctx { struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param; #endif union pipe_update_flags update_flags; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dwbc *dwbc; struct mcif_wb *mcif_wb; -#endif }; struct resource_context { @@ -327,9 +311,7 @@ struct resource_context { bool is_audio_acquired[MAX_PIPES]; uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES]; uint8_t dp_clock_source_ref_count; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool is_dsc_acquired[MAX_PIPES]; -#endif }; struct dce_bw_output { @@ -349,18 +331,14 @@ struct dce_bw_output { int blackout_recovery_time_us; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dcn_bw_writeback { struct mcif_arb_params mcif_wb_arb[MAX_DWB_PIPES]; }; -#endif struct dcn_bw_output { struct dc_clocks clk; struct dcn_watermark_set watermarks; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dcn_bw_writeback bw_writeback; -#endif }; union bw_output { diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 1e6ff6eb5bfc..4879cf54d8f1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -75,13 +75,11 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable); enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void dp_set_fec_ready(struct dc_link *link, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable); void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); -#endif #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index a17a77192690..862952c0286a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -96,12 +96,10 @@ enum dentist_divider_range { .MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \ .MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67 -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define CLK_REG_LIST_NV10() \ SR(DENTIST_DISPCLK_CNTL), \ CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \ CLK_SRI(CLK3_CLK2_DFS_CNTL, CLK3, 0) -#endif #define CLK_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -120,7 +118,6 @@ enum dentist_divider_range { CLK_SF(MP1_SMN_C2PMSG_83, CONTENT, mask_sh),\ CLK_SF(MP1_SMN_C2PMSG_91, CONTENT, mask_sh), -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh) \ CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\ @@ -130,7 +127,6 @@ enum dentist_divider_range { CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\ CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_int, mask_sh),\ CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_frac, mask_sh) -#endif #define CLK_REG_FIELD_LIST(type) \ type DPREFCLK_SRC_SEL; \ @@ -143,30 +139,24 @@ enum dentist_divider_range { ****************** Clock Manager Private Structures *********************************** *************************************************************************************** */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define CLK20_REG_FIELD_LIST(type) \ type DENTIST_DPPCLK_WDIVIDER; \ type DENTIST_DPPCLK_CHG_DONE; \ type FbMult_int; \ type FbMult_frac; -#endif #define VBIOS_SMU_REG_FIELD_LIST(type) \ type CONTENT; struct clk_mgr_shift { CLK_REG_FIELD_LIST(uint8_t) -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 CLK20_REG_FIELD_LIST(uint8_t) -#endif VBIOS_SMU_REG_FIELD_LIST(uint32_t) }; struct clk_mgr_mask { CLK_REG_FIELD_LIST(uint32_t) -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 CLK20_REG_FIELD_LIST(uint32_t) -#endif VBIOS_SMU_REG_FIELD_LIST(uint32_t) }; @@ -174,10 +164,8 @@ struct clk_mgr_registers { uint32_t DPREFCLK_CNTL; uint32_t DENTIST_DISPCLK_CNTL; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 uint32_t CLK3_CLK2_DFS_CNTL; uint32_t CLK3_CLK_PLL_REQ; -#endif uint32_t MP1_SMN_C2PMSG_67; uint32_t MP1_SMN_C2PMSG_83; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index c81a17aeaa25..c0dc1d0f5cae 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -52,7 +52,6 @@ struct dcn_hubbub_wm { struct dcn_hubbub_wm_set sets[4]; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dcn_hubbub_page_table_depth { DCN_PAGE_TABLE_DEPTH_1_LEVEL, DCN_PAGE_TABLE_DEPTH_2_LEVEL, @@ -101,13 +100,11 @@ struct hubbub_addr_config { } default_addrs; }; -#endif struct hubbub_funcs { void (*update_dchub)( struct hubbub *hubbub, struct dchub_init_data *dh_data); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 int (*init_dchub_sys_ctx)( struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config); @@ -116,7 +113,6 @@ struct hubbub_funcs { struct dcn_hubbub_virt_addr_config *va_config, int vmid); -#endif bool (*get_dcc_compression_cap)(struct hubbub *hubbub, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 474c7194a9f8..125e42dbd3c5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -36,14 +36,10 @@ struct dpp { struct dpp_caps *caps; struct pwl_params regamma_params; struct pwl_params degamma_params; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dpp_cursor_attributes cur_attr; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pwl_params shaper_params; bool cm_bypass_mode; -#endif }; struct dpp_input_csc_matrix { @@ -56,7 +52,6 @@ struct dpp_grph_csc_adjustment { enum graphics_gamut_adjust_type gamut_adjust_type; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct cnv_color_keyer_params { int color_keyer_en; int color_keyer_mode; @@ -82,7 +77,6 @@ struct cnv_alpha_2bit_lut { int lut2; int lut3; }; -#endif struct dcn_dpp_state { uint32_t is_enabled; @@ -190,12 +184,8 @@ struct dpp_funcs { enum surface_pixel_format format, enum expansion_mode mode, struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dc_color_space input_color_space, struct cnv_alpha_2bit_lut *alpha_2bit_lut); -#else - enum dc_color_space input_color_space); -#endif void (*dpp_full_bypass)(struct dpp *dpp_base); @@ -224,7 +214,6 @@ struct dpp_funcs { bool dppclk_div, bool enable); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool (*dpp_program_blnd_lut)( struct dpp *dpp, const struct pwl_params *params); @@ -237,7 +226,6 @@ struct dpp_funcs { void (*dpp_cnv_set_alpha_keyer)( struct dpp *dpp_base, struct cnv_color_keyer_params *color_keyer); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index c6ff3d78b435..c59740084ebc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #ifndef __DAL_DSC_H__ #define __DAL_DSC_H__ @@ -98,4 +97,3 @@ struct dsc_funcs { }; #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index ff1a07b35c85..aed67754e81b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -51,11 +51,7 @@ enum dwb_source { dwb_src_otg3, /* for DCN1.x/DCN2.x */ }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* DCN1.x, DCN2.x support 2 pipes */ -#else -/* DCN1.x supports 2 pipes */ -#endif enum dwb_pipe { dwb_pipe0 = 0, #if defined(CONFIG_DRM_AMD_DC_DCN1_0) @@ -64,7 +60,6 @@ enum dwb_pipe { dwb_pipe_max_num, }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) enum dwb_frame_capture_enable { DWB_FRAME_CAPTURE_DISABLE = 0, DWB_FRAME_CAPTURE_ENABLE = 1, @@ -77,9 +72,7 @@ enum wbscl_coef_filter_type_sel { WBSCL_COEF_CHROMA_HORZ_FILTER = 3 }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dwb_warmup_params { bool warmup_en; /* false: normal mode, true: enable pattern generator */ bool warmup_mode; /* false: 420, true: 444 */ @@ -88,7 +81,6 @@ struct dwb_warmup_params { int warmup_width; /* Pattern width (pixels) */ int warmup_height; /* Pattern height (lines) */ }; -#endif struct dwb_caps { enum dce_version hw_version; /* DCN engine version. */ @@ -150,13 +142,11 @@ struct dwbc_funcs { struct dwbc *dwbc, bool is_new_content); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*set_warmup)( struct dwbc *dwbc, struct dwb_warmup_params *warmup_params); -#endif bool (*get_dwb_status)( struct dwbc *dwbc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 809b62b51a43..62b2d24cd1d3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -38,9 +38,7 @@ enum cursor_pitch { }; enum cursor_lines_per_chunk { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) CURSOR_LINE_PER_CHUNK_1 = 0, /* new for DCN2 */ -#endif CURSOR_LINE_PER_CHUNK_2 = 1, CURSOR_LINE_PER_CHUNK_4, CURSOR_LINE_PER_CHUNK_8, @@ -139,7 +137,6 @@ struct hubp_funcs { unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); void (*hubp_init)(struct hubp *hubp); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*dmdata_set_attributes)( struct hubp *hubp, const struct dc_dmdata_attributes *attr); @@ -159,7 +156,6 @@ struct hubp_funcs { void (*hubp_set_flip_control_surface_gsl)( struct hubp *hubp, bool enable); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index f82365e2d03c..99ae8181d429 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -36,9 +36,7 @@ #define MAX_AUDIOS 7 #define MAX_PIPES 6 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define MAX_DWB_PIPES 1 -#endif struct gamma_curve { uint32_t offset; @@ -81,7 +79,6 @@ struct pwl_result_data { uint32_t delta_blue_reg; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_rgb { uint32_t red; uint32_t green; @@ -110,7 +107,6 @@ struct tetrahedral_params { bool use_12bits; }; -#endif /* arr_curve_points - regamma regions/segments specification * arr_points - beginning and end point specified separately (only one on DCE) @@ -195,13 +191,11 @@ enum opp_regamma { OPP_REGAMMA_USER }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT enum optc_dsc_mode { OPTC_DSC_DISABLED = 0, OPTC_DSC_ENABLED_444 = 1, /* 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4) */ OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED = 2 /* Native 4:2:2 or 4:2:0 */ }; -#endif struct dc_bias_and_scale { uint16_t scale_red; @@ -224,12 +218,8 @@ enum test_pattern_mode { TEST_PATTERN_MODE_VERTICALBARS, TEST_PATTERN_MODE_HORIZONTALBARS, TEST_PATTERN_MODE_SINGLERAMP_RGB, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) TEST_PATTERN_MODE_DUALRAMP_RGB, TEST_PATTERN_MODE_XR_BIAS_RGB -#else - TEST_PATTERN_MODE_DUALRAMP_RGB -#endif }; enum test_pattern_color_format { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index af57751ed8a1..fb748f082c56 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -113,12 +113,9 @@ struct link_encoder { struct encoder_feature_support features; enum transmitter transmitter; enum hpd_source_id hpd_source; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool usbc_combo_phy; -#endif }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct link_enc_state { uint32_t dphy_fec_en; @@ -127,13 +124,10 @@ struct link_enc_state { uint32_t dp_link_training_complete; }; -#endif struct link_encoder_funcs { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*read_state)( struct link_encoder *enc, struct link_enc_state *s); -#endif bool (*validate_output_with_stream)( struct link_encoder *enc, const struct dc_stream_state *stream); void (*hw_init)(struct link_encoder *enc); @@ -175,7 +169,6 @@ struct link_encoder_funcs { unsigned int (*get_dig_frontend)(struct link_encoder *enc); void (*destroy)(struct link_encoder **enc); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*fec_set_enable)(struct link_encoder *enc, bool enable); @@ -183,7 +176,6 @@ struct link_encoder_funcs { bool ready); bool (*fec_is_active)(struct link_encoder *enc); -#endif bool (*is_in_alt_mode) (struct link_encoder *enc); void (*get_max_link_cap)(struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 58826be81395..094afc4c8173 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -31,9 +31,7 @@ #define MAX_MPCC 6 #define MAX_OPP 6 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define MAX_DWB 1 -#endif enum mpc_output_csc_mode { MPC_OUTPUT_CSC_DISABLE = 0, @@ -66,14 +64,12 @@ struct mpcc_blnd_cfg { int global_alpha; bool overlap_only; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* MPCC top/bottom gain settings */ int bottom_gain_mode; int background_color_bpc; int top_gain; int bottom_inside_gain; int bottom_outside_gain; -#endif }; struct mpcc_sm_cfg { @@ -90,7 +86,6 @@ struct mpcc_sm_cfg { int force_next_field_polarity; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct mpc_denorm_clamp { int clamp_max_r_cr; int clamp_min_r_cr; @@ -99,7 +94,6 @@ struct mpc_denorm_clamp { int clamp_max_b_cb; int clamp_min_b_cb; }; -#endif /* * MPCC connection and blending configuration for a single MPCC instance. @@ -126,10 +120,8 @@ struct mpc { struct dc_context *ctx; struct mpcc mpcc_array[MAX_MPCC]; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pwl_params blender_params; bool cm_bypass_mode; -#endif }; struct mpcc_state { @@ -230,7 +222,6 @@ struct mpc_funcs { struct mpc *mpc, struct mpc_tree *tree); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*set_denorm)(struct mpc *mpc, int opp_id, enum dc_color_depth output_depth); @@ -258,7 +249,6 @@ struct mpc_funcs { struct mpc *mpc, int mpcc_id, bool power_on); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 18def2b6fafe..e2d960e5fc1c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -263,9 +263,7 @@ struct oppbuf_params { enum oppbuf_display_segmentation mso_segmentation; uint32_t mso_overlap_pixel_num; uint32_t pixel_repetition; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t num_segment_padded_pixels; -#endif }; struct opp_funcs { @@ -305,7 +303,6 @@ struct opp_funcs { struct output_pixel_processor *opp, bool enable); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*opp_set_disp_pattern_generator)( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, @@ -324,7 +321,6 @@ struct opp_funcs { void (*opp_program_left_edge_extra_pixel)( struct output_pixel_processor *opp, bool count); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index c0b93d51ca8d..351b387ad606 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -65,13 +65,11 @@ struct audio_clock_info { uint32_t cts_48khz; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) enum dynamic_metadata_mode { dmdata_dp, dmdata_hdmi, dmdata_dolby_vision }; -#endif struct encoder_info_frame { /* auxiliary video information */ @@ -90,9 +88,7 @@ struct encoder_info_frame { struct encoder_unblank_param { struct dc_link_settings link_settings; struct dc_crtc_timing timing; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 int opp_cnt; -#endif }; struct encoder_set_dp_phy_pattern_param { @@ -109,7 +105,6 @@ struct stream_encoder { enum engine_id id; }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct enc_state { uint32_t dsc_mode; // DISABLED 0; 1 or 2 indicate enabled state. uint32_t dsc_slice_width; @@ -119,7 +114,6 @@ struct enc_state { uint32_t sec_gsp_pps_enable; uint32_t sec_stream_enable; }; -#endif struct stream_encoder_funcs { void (*dp_set_stream_attribute)( @@ -220,8 +214,6 @@ struct stream_encoder_funcs { enum dc_pixel_encoding *encoding, enum dc_color_depth *depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s); void (*dp_set_dsc_config)( @@ -233,7 +225,6 @@ struct stream_encoder_funcs { void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc, bool enable, uint8_t *dsc_packed_pps); -#endif void (*set_dynamic_metadata)(struct stream_encoder *enc, bool enable, @@ -243,7 +234,6 @@ struct stream_encoder_funcs { void (*dp_set_odm_combine)( struct stream_encoder *enc, bool odm_combine); -#endif }; #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 27c73caf74ee..2d3efd71fa51 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -195,10 +195,8 @@ struct timing_generator_funcs { void (*lock)(struct timing_generator *tg); void (*lock_doublebuffer_disable)(struct timing_generator *tg); void (*lock_doublebuffer_enable)(struct timing_generator *tg); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void(*triplebuffer_unlock)(struct timing_generator *tg); void(*triplebuffer_lock)(struct timing_generator *tg); -#endif void (*enable_reset_trigger)(struct timing_generator *tg, int source_tg_inst); void (*enable_crtc_reset)(struct timing_generator *tg, @@ -235,7 +233,6 @@ struct timing_generator_funcs { bool (*is_optc_underflow_occurred)(struct timing_generator *tg); void (*clear_optc_underflow)(struct timing_generator *tg); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*set_dwb_source)(struct timing_generator *optc, uint32_t dwb_pipe_inst); @@ -243,7 +240,6 @@ struct timing_generator_funcs { uint32_t *num_of_input_segments, uint32_t *seg0_src_sel, uint32_t *seg1_src_sel); -#endif /** * Configure CRCs for the given timing generator. Return false if TG is @@ -267,13 +263,10 @@ struct timing_generator_funcs { void (*set_vtg_params)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*set_dsc_config)(struct timing_generator *optc, enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); -#endif void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing); @@ -281,7 +274,6 @@ struct timing_generator_funcs { void (*set_gsl_source_select)(struct timing_generator *optc, int group_idx, uint32_t gsl_ready_signal); -#endif }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index d39c1e11def5..23e3a541b7c9 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -66,19 +66,15 @@ struct dce_hwseq { struct pipe_ctx; struct dc_state; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_stream_status; struct dc_writeback_info; -#endif struct dchub_init_data; struct dc_static_screen_events; struct resource_pool; struct resource_context; struct stream_resource; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config; struct dc_virtual_addr_space_config; -#endif struct hubp; struct dpp; @@ -113,7 +109,6 @@ struct hw_sequencer_funcs { uint16_t *matrix, int opp_id); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*program_front_end_for_ctx)( struct dc *dc, struct dc_state *context); @@ -124,7 +119,6 @@ struct hw_sequencer_funcs { void (*set_flip_control_gsl)( struct pipe_ctx *pipe_ctx, bool flip_immediate); -#endif void (*update_plane_addr)( const struct dc *dc, @@ -138,7 +132,6 @@ struct hw_sequencer_funcs { struct dce_hwseq *hws, struct dchub_init_data *dh_data); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 int (*init_sys_ctx)( struct dce_hwseq *hws, struct dc *dc, @@ -148,7 +141,6 @@ struct hw_sequencer_funcs { struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid); -#endif void (*update_mpcc)( struct dc *dc, struct pipe_ctx *pipe_ctx); @@ -239,13 +231,11 @@ struct hw_sequencer_funcs { const struct dc *dc, struct dc_state *context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool (*update_bandwidth)( struct dc *dc, struct dc_state *context); void (*program_dmdata_engine)(struct pipe_ctx *pipe_ctx); bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx); -#endif void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, unsigned int vmin, unsigned int vmax, @@ -323,7 +313,6 @@ struct hw_sequencer_funcs { bool power_on); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); void (*program_all_writeback_pipes_in_tree)( struct dc *dc, @@ -339,7 +328,6 @@ struct hw_sequencer_funcs { struct dc_state *context); void (*disable_writeback)(struct dc *dc, unsigned int dwb_pipe_inst); -#endif enum dc_status (*set_clock)(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index bef224bf803e..7a85abc53d05 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -46,12 +46,8 @@ struct resource_caps { int num_pll; int num_dwb; int num_ddc; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 int num_vmid; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT int num_dsc; -#endif -#endif }; struct resource_straps { diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index ea75420fc876..75db39691616 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -66,11 +66,9 @@ IRQ_DCN1 = irq_service_dcn10.o AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1)) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN1) -endif ############################################################################### # DCN 20 ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_0 IRQ_DCN2 = irq_service_dcn20.o AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2)) diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index d1ce75212d9f..b8040da94b9d 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -82,22 +82,14 @@ static void virtual_stream_encoder_reset_hdmi_stream_attribute( struct stream_encoder *enc) {} -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static void virtual_enc_dp_set_odm_combine( struct stream_encoder *enc, bool odm_combine) {} -#endif -#endif static const struct stream_encoder_funcs virtual_str_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .dp_set_odm_combine = virtual_enc_dp_set_odm_combine, -#endif -#endif .dp_set_stream_attribute = virtual_stream_encoder_dp_set_stream_attribute, .hdmi_set_stream_attribute = diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 1be6c44fd32f..70dbf64d1644 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -150,7 +150,6 @@ #define FAMILY_RV 142 /* DCN 1*/ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define FAMILY_NV 143 /* DCN 2*/ @@ -164,7 +163,6 @@ enum { #define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0) #define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0)) #define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN)) -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define RENOIR_A0 0x91 #define DEVICE_ID_RENOIR_1636 0x1636 // Renoir diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index fcc42372b6cf..2db5d4f60ac3 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -46,9 +46,7 @@ enum dce_version { DCE_VERSION_MAX, DCN_VERSION_1_0, DCN_VERSION_1_01, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) DCN_VERSION_2_0, -#endif #if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN_VERSION_2_1, #endif diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 2b219cdb13ad..89a709267019 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -66,12 +66,8 @@ #define DC_LOG_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__) #define DC_LOG_ALL_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__) #define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__) -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN3_0) || defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) -#endif struct dal_logger; @@ -116,9 +112,7 @@ enum dc_log_type { LOG_PERF_TRACE, LOG_DISPLAYSTATS, LOG_HDMI_RETIMER_REDRIVER, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT LOG_DSC, -#endif LOG_DWB, LOG_GAMMA_DEBUG, LOG_MAX_HW_POINTS, diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h index b45f7d65e76a..fe2117904329 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h @@ -45,7 +45,6 @@ enum vrr_packet_type { PACKET_TYPE_VTEM }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) union lut3d_control_flags { unsigned int raw; struct { @@ -104,6 +103,5 @@ struct lut3d_settings { enum lut3d_control_gamut_map map2; enum lut3d_control_rotation_mode rotation2; }; -#endif #endif /* MOD_SHARED_H_ */ -- cgit From aca935c7cc866a935a61769c9e9782dd834a8502 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 6 Nov 2019 14:44:19 -0500 Subject: drm/amd/display: Drop CONFIG_DRM_AMD_DC_DCN2_1 flag [Why] DCN21 is stable enough to be build by default. So drop the flags. [How] Remove them using the unifdef tool. The following commands were executed in sequence: $ find -name '*.c' -exec unifdef -m -DCONFIG_DRM_AMD_DC_DCN2_1 -UCONFIG_TRIM_DRM_AMD_DC_DCN2_1 '{}' ';' $ find -name '*.h' -exec unifdef -m -DCONFIG_DRM_AMD_DC_DCN2_1 -UCONFIG_TRIM_DRM_AMD_DC_DCN2_1 '{}' ';' In addition: * Remove from kconfig, and replace any dependencies with DCN1_0. * Remove from any makefiles. * Fix and cleanup Renoir definitions in dal_asic_id.h * Expand DCN1 ifdef to include DCN21 code in the following files: * clk_mgr/clk_mgr.c: dc_clk_mgr_create() * core/dc_resources.c: dc_create_resource_pool() * gpio/hw_factory.c: dal_hw_factory_init() * gpio/hw_translate.c: dal_hw_translate_init() Signed-off-by: Bhawanpreet Lakha Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 -- drivers/gpu/drm/amd/display/Kconfig | 18 +----------------- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ------ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 4 ---- drivers/gpu/drm/amd/display/dc/Makefile | 3 --- .../drm/amd/display/dc/bios/command_table_helper2.c | 2 -- drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile | 2 -- drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c | 4 ---- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 -- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 6 ------ drivers/gpu/drm/amd/display/dc/dc.h | 2 -- drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h | 2 -- drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 6 ------ drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h | 2 -- drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 4 ---- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h | 8 -------- .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 -- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h | 16 ---------------- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 8 -------- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 2 -- drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | 4 ---- drivers/gpu/drm/amd/display/dc/dml/Makefile | 4 ---- drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c | 6 ------ drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h | 2 -- drivers/gpu/drm/amd/display/dc/gpio/Makefile | 5 +++-- .../drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h | 2 -- .../drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h | 2 -- drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c | 4 ---- drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c | 4 ---- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 4 ---- drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 6 ------ drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h | 2 -- drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 2 -- drivers/gpu/drm/amd/display/dc/irq/Makefile | 2 -- drivers/gpu/drm/amd/display/include/dal_asic_id.h | 2 -- drivers/gpu/drm/amd/display/include/dal_types.h | 2 -- 36 files changed, 4 insertions(+), 150 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1b865d7f904d..329bd3787e57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2603,8 +2603,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case CHIP_RENOIR: #endif return amdgpu_dc != 0; diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 0b4c71dc0447..b5a9bfe8998c 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -15,23 +15,7 @@ config DRM_AMD_DC config DRM_AMD_DC_DCN1_0 def_bool n help - RV and NV family support for display engine - -config DRM_AMD_DC_DCN2_1 - bool "DCN 2.1 family" - depends on DRM_AMD_DC && X86 - help - Choose this option if you want to have - Renoir support for display engine - -config DRM_AMD_DC_DSC_SUPPORT - bool "DSC support" - default y - depends on DRM_AMD_DC && X86 - depends on DRM_AMD_DC_DCN1_0 - help - Choose this option if you want to have - Dynamic Stream Compression support + Raven, Navi and Renoir family support for display engine config DRM_AMD_DC_HDCP bool "Enable HDCP support in DC" diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6c986d446864..ad212406ca3b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2756,9 +2756,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case CHIP_NAVI12: case CHIP_NAVI10: case CHIP_NAVI14: -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case CHIP_RENOIR: -#endif if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -2922,13 +2920,11 @@ static int dm_early_init(void *handle) adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case CHIP_RENOIR: adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; break; -#endif default: DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); return -EINVAL; @@ -3224,9 +3220,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_NAVI14 || adev->asic_type == CHIP_NAVI12 || -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) adev->asic_type == CHIP_RENOIR || -#endif adev->asic_type == CHIP_RAVEN) { /* Fill GFX9 params */ tiling_info->gfx9.num_pipes = diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 118488e473c1..229788bee544 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -891,7 +891,6 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, return PP_SMU_RESULT_FAIL; } -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 enum pp_smu_status pp_rn_get_dpm_clock_table( struct pp_smu *pp, struct dpm_clocks *clock_table) { @@ -973,7 +972,6 @@ enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, return PP_SMU_RESULT_OK; } -#endif void dm_pp_get_funcs( struct dc_context *ctx, @@ -1018,14 +1016,12 @@ void dm_pp_get_funcs( funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; break; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 case DCN_VERSION_2_1: funcs->ctx.ver = PP_SMU_VER_RN; funcs->rn_funcs.pp_smu.dm = ctx; funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges; funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table; break; -#endif default: DRM_ERROR("smu version is not supported !\n"); break; diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 38ef29719400..382131166aa2 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -29,9 +29,6 @@ ifdef CONFIG_DRM_AMD_DC_DCN1_0 DC_LIBS += dcn20 DC_LIBS += dsc DC_LIBS += dcn10 dml -endif - -ifdef CONFIG_DRM_AMD_DC_DCN2_1 DC_LIBS += dcn21 endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 45bb2bd81ba1..47bb802b7164 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -65,11 +65,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2( case DCN_VERSION_2_0: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; -#endif case DCE_VERSION_12_0: case DCE_VERSION_12_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index 9f15817a3eed..de01543f0161 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -81,9 +81,7 @@ CLK_MGR_DCN20 = dcn20_clk_mgr.o AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 ############################################################################### # DCN21 ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 76b4831a826e..740d92bd4481 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -37,9 +37,7 @@ #include "dcn10/rv1_clk_mgr.h" #include "dcn10/rv2_clk_mgr.h" #include "dcn20/dcn20_clk_mgr.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/rn_clk_mgr.h" -#endif int clk_mgr_helper_get_active_display_cnt( @@ -136,12 +134,10 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case FAMILY_RV: -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) { rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); break; } -#endif /* DCN2_1 */ if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) { rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); break; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index e7e552f02b51..30e42cbc17d1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -705,10 +705,8 @@ static bool construct(struct dc *dc, if (!dc->clk_mgr) goto fail; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 if (dc->res_pool->funcs->update_bw_bounding_box) dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); -#endif /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 6e20c76b9933..162e512831b7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -50,9 +50,7 @@ #include "dcn10/dcn10_resource.h" #endif #include "dcn20/dcn20_resource.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/dcn21_resource.h" -#endif #include "dce120/dce120_resource.h" #define DC_LOGGER_INIT(logger) @@ -102,10 +100,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) dc_version = DCN_VERSION_1_0; if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_1_01; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_2_1; -#endif break; #endif @@ -168,11 +164,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, case DCN_VERSION_2_0: res_pool = dcn20_create_resource_pool(init_data, dc); break; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: res_pool = dcn21_create_resource_pool(init_data, dc); break; -#endif #endif default: diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 102a55d8d026..a6c40c07f07d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -401,9 +401,7 @@ struct dc_debug_options { bool dmub_command_table; /* for testing only */ struct dc_bw_validation_profile bw_val_profile; bool disable_fec; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 bool disable_48mhz_pwrdwn; -#endif /* This forces a hard min on the DCFCLK requested to SMU/PP * watermarks are not affected. */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index 5b4a29ee1696..8d0d07db5190 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -76,7 +76,6 @@ SRII(PIXEL_RATE_CNTL, OTG, 4),\ SRII(PIXEL_RATE_CNTL, OTG, 5) -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ SRII(PHASE, DP_DTO, 0),\ @@ -91,7 +90,6 @@ SRII(PIXEL_RATE_CNTL, OTG, 1),\ SRII(PIXEL_RATE_CNTL, OTG, 2),\ SRII(PIXEL_RATE_CNTL, OTG, 3) -#endif #define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 4144b1055db2..d01fb2f55535 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -440,7 +440,6 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) return status; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) static bool dcn21_dmcu_init(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); @@ -452,7 +451,6 @@ static bool dcn21_dmcu_init(struct dmcu *dmcu) return dcn10_dmcu_init(dmcu); } -#endif static bool dcn10_dmcu_load_iram(struct dmcu *dmcu, unsigned int start_offset, @@ -834,7 +832,6 @@ static const struct dmcu_funcs dcn20_funcs = { .unlock_phy = dcn20_unlock_phy }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) static const struct dmcu_funcs dcn21_funcs = { .dmcu_init = dcn21_dmcu_init, .load_iram = dcn10_dmcu_load_iram, @@ -848,7 +845,6 @@ static const struct dmcu_funcs dcn21_funcs = { .unlock_phy = dcn20_unlock_phy }; #endif -#endif static void dce_dmcu_construct( struct dce_dmcu *dmcu_dce, @@ -952,7 +948,6 @@ struct dmcu *dcn20_dmcu_create( return &dmcu_dce->base; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct dmcu *dcn21_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, @@ -974,7 +969,6 @@ struct dmcu *dcn21_dmcu_create( return &dmcu_dce->base; } #endif -#endif void dce_dmcu_destroy(struct dmcu **dmcu) { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h index 89277899b507..5e044c2d3d6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h @@ -272,13 +272,11 @@ struct dmcu *dcn20_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct dmcu *dcn21_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask); -#endif void dce_dmcu_destroy(struct dmcu **dmcu); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index f31eea1bdec0..ebe8f9a21be2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -276,7 +276,6 @@ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HWSEQ_DCN21_REG_LIST()\ HWSEQ_DCN_REG_LIST(), \ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \ @@ -327,7 +326,6 @@ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() -#endif struct dce_hwseq_registers { @@ -635,7 +633,6 @@ struct dce_hwseq_registers { HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -678,7 +675,6 @@ struct dce_hwseq_registers { HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) -#endif #define HWSEQ_REG_FIELD_LIST(type) \ type DCFE_CLOCK_ENABLE; \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index 69d903d68661..af57751253de 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -121,7 +121,6 @@ struct dcn_hubbub_registers { uint32_t DCN_VM_AGP_BASE; uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB; uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C; @@ -140,7 +139,6 @@ struct dcn_hubbub_registers { uint32_t DCHVM_CLK_CTRL; uint32_t DCHVM_RIOMMU_CTRL0; uint32_t DCHVM_RIOMMU_STAT0; -#endif }; /* set field name */ @@ -232,7 +230,6 @@ struct dcn_hubbub_registers { type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HUBBUB_HVM_REG_FIELD_LIST(type) \ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\ type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\ @@ -278,22 +275,17 @@ struct dcn_hubbub_registers { type HOSTVM_POWERSTATUS; \ type RIOMMU_ACTIVE; \ type HOSTVM_PREFETCH_DONE -#endif struct dcn_hubbub_shift { DCN_HUBBUB_REG_FIELD_LIST(uint8_t); HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) HUBBUB_HVM_REG_FIELD_LIST(uint8_t); -#endif }; struct dcn_hubbub_mask { DCN_HUBBUB_REG_FIELD_LIST(uint32_t); HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) HUBBUB_HVM_REG_FIELD_LIST(uint32_t); -#endif }; struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index fc07538f00ec..adba767ccf2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -677,10 +677,8 @@ static void dcn10_bios_golden_init(struct dc *dc) int i; bool allow_self_fresh_force_enable = true; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc)) return; -#endif if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled) allow_self_fresh_force_enable = dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index d5c8615af45e..8c04a3606a54 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -148,7 +148,6 @@ uint32_t VMID_SETTINGS_0 -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \ uint32_t FLIP_PARAMETERS_3;\ @@ -157,7 +156,6 @@ uint32_t FLIP_PARAMETERS_6;\ uint32_t VBLANK_PARAMETERS_5;\ uint32_t VBLANK_PARAMETERS_6 -#endif #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \ DCN_HUBP_REG_FIELD_BASE_LIST(type); \ @@ -184,7 +182,6 @@ type SURFACE_TRIPLE_BUFFER_ENABLE;\ type VMID -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 #define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \ DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\ type REFCYC_PER_VM_GROUP_FLIP;\ @@ -194,31 +191,18 @@ type REFCYC_PER_PTE_GROUP_FLIP_C; \ type REFCYC_PER_META_CHUNK_FLIP_C; \ type VM_GROUP_SIZE -#endif struct dcn_hubp2_registers { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN21_HUBP_REG_COMMON_VARIABLE_LIST; -#else - DCN2_HUBP_REG_COMMON_VARIABLE_LIST; -#endif }; struct dcn_hubp2_shift { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t); -#else - DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t); -#endif }; struct dcn_hubp2_mask { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t); -#else - DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t); -#endif }; struct dcn20_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index d246d94e9b51..3d5a79ff1151 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2599,11 +2599,9 @@ static void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif if (vlevel < 2) { pipes[0].clks_cfg.voltage = 2; @@ -2615,10 +2613,8 @@ static void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif if (vlevel < 3) { pipes[0].clks_cfg.voltage = 3; @@ -2630,10 +2626,8 @@ static void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; @@ -2643,10 +2637,8 @@ static void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -#endif } void dcn20_calculate_dlg_params( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 0f6e2a08b663..39321b2a5504 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -976,11 +976,9 @@ static void calculate_wm_set_for_vlevel( wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; -#endif dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; } diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index dc9dbbc51d1c..ae608c329366 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -42,9 +42,7 @@ enum pp_smu_ver { PP_SMU_UNSUPPORTED, PP_SMU_VER_RV, PP_SMU_VER_NV, -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) PP_SMU_VER_RN, -#endif PP_SMU_VER_MAX }; @@ -288,9 +286,7 @@ struct pp_smu_funcs { union { struct pp_smu_funcs_rv rv_funcs; struct pp_smu_funcs_nv nv_funcs; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct pp_smu_funcs_rn rn_funcs; -#endif }; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index be9815b5f1e7..6ec923a2a183 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -44,8 +44,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) endif @@ -59,8 +57,6 @@ DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ ifdef CONFIG_DRM_AMD_DC_DCN1_0 DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index 9c6016e57d2b..2689401a03a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -29,10 +29,8 @@ #include "dcn20/display_rq_dlg_calc_20.h" #include "dcn20/display_mode_vba_20v2.h" #include "dcn20/display_rq_dlg_calc_20v2.h" -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 #include "dcn21/display_mode_vba_21.h" #include "dcn21/display_rq_dlg_calc_21.h" -#endif const struct dml_funcs dml20_funcs = { .validate = dml20_ModeSupportAndSystemConfigurationFull, @@ -48,14 +46,12 @@ const struct dml_funcs dml20v2_funcs = { .rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 const struct dml_funcs dml21_funcs = { .validate = dml21_ModeSupportAndSystemConfigurationFull, .recalculate = dml21_recalculate, .rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg, .rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg }; -#endif void dml_init_instance(struct display_mode_lib *lib, const struct _vcs_dpi_soc_bounding_box_st *soc_bb, @@ -72,11 +68,9 @@ void dml_init_instance(struct display_mode_lib *lib, case DML_PROJECT_NAVI10v2: lib->funcs = dml20v2_funcs; break; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 case DML_PROJECT_DCN21: lib->funcs = dml21_funcs; break; -#endif default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index 212188be1ec1..cf2758ca5b02 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -34,9 +34,7 @@ enum dml_project { DML_PROJECT_RAVEN1, DML_PROJECT_NAVI10, DML_PROJECT_NAVI10v2, -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 DML_PROJECT_DCN21, -#endif }; struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile index 7791cd29fc18..013cfac4ff55 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile @@ -76,9 +76,10 @@ GPIO_DCN20 = hw_translate_dcn20.o hw_factory_dcn20.o AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN20) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 +############################################################################### +# DCN 21 +############################################################################### GPIO_DCN21 = hw_translate_dcn21.o hw_factory_dcn21.o AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h index 2443f9e7afbf..4949e0c7fa06 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #ifndef __DAL_HW_FACTORY_DCN21_H__ #define __DAL_HW_FACTORY_DCN21_H__ @@ -30,4 +29,3 @@ void dal_hw_factory_dcn21_init(struct hw_factory *factory); #endif /* __DAL_HW_FACTORY_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h index 2bfaac24c574..9462b0a65200 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #ifndef __DAL_HW_TRANSLATE_DCN21_H__ #define __DAL_HW_TRANSLATE_DCN21_H__ @@ -32,4 +31,3 @@ struct hw_translate; void dal_hw_translate_dcn21_init(struct hw_translate *tr); #endif /* __DAL_HW_TRANSLATE_DCN21_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index edd70292cf86..fb2d66729ca3 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -49,9 +49,7 @@ #include "dcn10/hw_factory_dcn10.h" #endif #include "dcn20/hw_factory_dcn20.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/hw_factory_dcn21.h" -#endif #include "diagnostics/hw_factory_diag.h" @@ -97,11 +95,9 @@ bool dal_hw_factory_init( case DCN_VERSION_2_0: dal_hw_factory_dcn20_init(factory); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: dal_hw_factory_dcn21_init(factory); return true; -#endif #endif default: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index 8e10bff4c074..55acfda9ea63 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -47,9 +47,7 @@ #include "dcn10/hw_translate_dcn10.h" #endif #include "dcn20/hw_translate_dcn20.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/hw_translate_dcn21.h" -#endif #include "diagnostics/hw_translate_diag.h" @@ -92,11 +90,9 @@ bool dal_hw_translate_init( case DCN_VERSION_2_0: dal_hw_translate_dcn20_init(translate); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: dal_hw_translate_dcn21_init(translate); return true; -#endif #endif default: diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 4e00c26c4eea..c98d887cc6e2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -87,9 +87,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); struct resource_pool; struct dc_state; struct resource_context; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct clk_bw_params; -#endif struct resource_funcs { void (*destroy)(struct resource_pool **pool); @@ -143,11 +141,9 @@ struct resource_funcs { struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) void (*update_bw_bounding_box)( struct dc *dc, struct clk_bw_params *bw_params); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 4e18e77dcf42..f55203e427de 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -31,7 +31,6 @@ #define DCN_MINIMUM_DISPCLK_Khz 100000 #define DCN_MINIMUM_DPPCLK_Khz 100000 -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 /* Constants */ #define DDR4_DRAM_WIDTH 64 #define WM_A 0 @@ -39,12 +38,10 @@ #define WM_C 2 #define WM_D 3 #define WM_SET_COUNT 4 -#endif #define DCN_MINIMUM_DISPCLK_Khz 100000 #define DCN_MINIMUM_DPPCLK_Khz 100000 -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 /* Will these bw structures be ASIC specific? */ #define MAX_NUM_DPM_LVL 8 @@ -152,7 +149,6 @@ struct clk_bw_params { struct clk_limit_table clk_table; struct wm_table wm_table; }; -#endif /* Public interfaces */ struct clk_states { @@ -193,9 +189,7 @@ struct clk_mgr { bool psr_allow_active_cache; int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes int dentist_vco_freq_khz; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 struct clk_bw_params *bw_params; -#endif }; /* forward declarations */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index 67b610d6d91f..2e2310f1901a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -40,11 +40,9 @@ struct cstate_pstate_watermarks_st { struct dcn_watermarks { uint32_t pte_meta_urgent_ns; uint32_t urgent_ns; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) uint32_t frac_urg_bw_nom; uint32_t frac_urg_bw_flip; int32_t urgent_latency_ns; -#endif struct cstate_pstate_watermarks_st cstate_pstate; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 23e3a541b7c9..663fa1809a73 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -337,9 +337,7 @@ struct hw_sequencer_funcs { enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) bool (*s0i3_golden_init_wa)(struct dc *dc); -#endif }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index 75db39691616..c26300c3936d 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -74,11 +74,9 @@ IRQ_DCN2 = irq_service_dcn20.o AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2)) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN2) -endif ############################################################################### # DCN 21 ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_1 IRQ_DCN21 = irq_service_dcn21.o AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21)) diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 70dbf64d1644..6f56208a9471 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -163,11 +163,9 @@ enum { #define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0) #define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0)) #define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN)) -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define RENOIR_A0 0x91 #define DEVICE_ID_RENOIR_1636 0x1636 // Renoir #define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF)) -#endif /* * ASIC chip ID diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index 2db5d4f60ac3..0b6859189ca7 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -47,9 +47,7 @@ enum dce_version { DCN_VERSION_1_0, DCN_VERSION_1_01, DCN_VERSION_2_0, -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN_VERSION_2_1, -#endif DCN_VERSION_MAX }; -- cgit From b86a1aa36a92bcfbc062c5e99c1d084f27f25bab Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 6 Nov 2019 14:48:35 -0500 Subject: drm/amd/display: rename DCN1_0 kconfig to DCN Since dcn20 and dcn21 are under dcn1 it doesnt make sense to have it named dcn1. Change it to "dcn" to make it generic Signed-off-by: Bhawanpreet Lakha Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/amd/display/Kconfig | 4 ++-- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 ++++---- drivers/gpu/drm/amd/display/dc/Makefile | 4 ++-- .../drm/amd/display/dc/bios/command_table_helper2.c | 2 +- drivers/gpu/drm/amd/display/dc/calcs/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c | 2 +- drivers/gpu/drm/amd/display/dc/core/dc.c | 8 ++++---- drivers/gpu/drm/amd/display/dc/core/dc_debug.c | 2 +- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 +- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 12 ++++++------ drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 4 ++-- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 2 +- .../gpu/drm/amd/display/dc/dce/dce_clock_source.h | 2 +- drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 10 +++++----- .../gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | 20 ++++++++++---------- .../drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h | 2 +- drivers/gpu/drm/amd/display/dc/dml/Makefile | 4 ++-- drivers/gpu/drm/amd/display/dc/gpio/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c | 4 ++-- drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c | 4 ++-- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 6 +++--- drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h | 2 +- drivers/gpu/drm/amd/display/dc/irq/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/irq/irq_service.c | 2 +- drivers/gpu/drm/amd/display/dc/os_types.h | 2 +- 30 files changed, 62 insertions(+), 62 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 329bd3787e57..9d210bb9bf33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2598,7 +2598,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case CHIP_RAVEN: case CHIP_NAVI10: case CHIP_NAVI14: diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index b5a9bfe8998c..78f40690a109 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -6,13 +6,13 @@ config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y select SND_HDA_COMPONENT if SND_HDA_CORE - select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) + select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and Raven ASICs. -config DRM_AMD_DC_DCN1_0 +config DRM_AMD_DC_DCN def_bool n help Raven, Navi and Renoir family support for display engine diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ad212406ca3b..d01186029e87 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -76,7 +76,7 @@ #include #include -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" #include "dcn/dcn_1_0_offset.h" @@ -2190,7 +2190,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) return 0; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* Register IRQ sources and initialize IRQ callbacks */ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) { @@ -2751,7 +2751,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case CHIP_RAVEN: case CHIP_NAVI12: case CHIP_NAVI10: @@ -2902,7 +2902,7 @@ static int dm_early_init(void *handle) adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case CHIP_RAVEN: adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 382131166aa2..6e3dddc73246 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -25,7 +25,7 @@ DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN DC_LIBS += dcn20 DC_LIBS += dsc DC_LIBS += dcn10 dml @@ -50,7 +50,7 @@ include $(AMD_DC) DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN DISPLAY_CORE += dc_vm_helper.o endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 47bb802b7164..7388c987c595 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -55,7 +55,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2( case DCE_VERSION_11_22: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: *h = dal_cmd_tbl_helper_dce112_get_table2(); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile index 985633c08a26..e54f2031b617 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile @@ -42,7 +42,7 @@ CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautologi BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index de01543f0161..3cd283195091 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -63,7 +63,7 @@ CLK_MGR_DCE120 = dce120_clk_mgr.o AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120) -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN ############################################################################### # DCN10 ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 740d92bd4481..a7c4c1d1fc59 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -132,7 +132,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dce120_clk_mgr_construct(ctx, clk_mgr); break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case FAMILY_RV: if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) { rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 30e42cbc17d1..1fdba13b3d0f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -566,7 +566,7 @@ static void destruct(struct dc *dc) kfree(dc->bw_dceip); dc->bw_dceip = NULL; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN kfree(dc->dcn_soc); dc->dcn_soc = NULL; @@ -585,7 +585,7 @@ static bool construct(struct dc *dc, struct dc_context *dc_ctx; struct bw_calcs_dceip *dc_dceip; struct bw_calcs_vbios *dc_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct dcn_soc_bounding_box *dcn_soc; struct dcn_ip_params *dcn_ip; #endif @@ -617,7 +617,7 @@ static bool construct(struct dc *dc, } dc->bw_vbios = dc_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); if (!dcn_soc) { dm_error("%s: failed to create dcn_soc\n", __func__); @@ -1296,7 +1296,7 @@ struct dc_state *dc_create_state(struct dc *dc) * initialize and obtain IP and SOC the base DML instance from DC is * initially copied into every context */ -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); #endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index b9227d5de3a3..85a52a16295a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -347,7 +347,7 @@ void context_clock_trace( struct dc *dc, struct dc_state *context) { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOGGER_INIT(dc->ctx->logger); CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 0a0badb2e206..bdc8be373ff0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2580,7 +2580,7 @@ bool dc_link_setup_psr(struct dc_link *link, psr_context->psr_level.u32all = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /*skip power down the single pipe since it blocks the cstate*/ if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev)) psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 162e512831b7..89b5f86cd40b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -46,7 +46,7 @@ #include "dce100/dce100_resource.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/dcn10_resource.h" #endif #include "dcn20/dcn20_resource.h" @@ -95,7 +95,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) else dc_version = DCE_VERSION_12_0; break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case FAMILY_RV: dc_version = DCN_VERSION_1_0; if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) @@ -154,7 +154,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, init_data->num_virtual_links, dc); break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: res_pool = dcn10_create_resource_pool(init_data, dc); @@ -1192,7 +1192,7 @@ static struct pipe_ctx *acquire_free_pipe_for_head( return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream); } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) static int acquire_first_split_pipe( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1273,7 +1273,7 @@ bool dc_add_plane_to_context( free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); - #if defined(CONFIG_DRM_AMD_DC_DCN1_0) + #if defined(CONFIG_DRM_AMD_DC_DCN) if (!free_pipe) { int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); if (pipe_idx >= 0) @@ -1947,7 +1947,7 @@ enum dc_status resource_map_pool_resources( /* acquire new resources */ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN if (pipe_idx < 0) pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); #endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index dc05c14530b0..371d49e9b745 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -32,7 +32,7 @@ #include "resource.h" #include "ipp.h" #include "timing_generator.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/dcn10_hw_sequencer.h" #endif @@ -235,7 +235,7 @@ struct dc_stream_status *dc_stream_get_status( static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) unsigned int vupdate_line; unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos; struct dc_stream_state *stream = pipe_ctx->stream; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a6c40c07f07d..2e6b3ecd564d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -492,7 +492,7 @@ struct dc { /* Inputs into BW and WM calculations. */ struct bw_calcs_dceip *bw_dceip; struct bw_calcs_vbios *bw_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct dcn_soc_bounding_box *dcn_soc; struct dcn_ip_params *dcn_ip; struct display_mode_lib dml; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 898decadb8e6..2e992fbc0d71 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -905,7 +905,7 @@ static bool dce112_program_pix_clk( struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); struct bp_pixel_clock_parameters bp_pc_params = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; unsigned dp_dto_ref_100hz = 7000000; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index 8d0d07db5190..51bd25079606 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -97,7 +97,7 @@ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\ CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index d01fb2f55535..e619e67e6b51 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -324,7 +324,7 @@ static void dce_get_psr_wait_loop( return; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) static void dcn10_get_dmcu_version(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); @@ -794,7 +794,7 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu) return true; } -#endif //(CONFIG_DRM_AMD_DC_DCN1_0) +#endif //(CONFIG_DRM_AMD_DC_DCN) static const struct dmcu_funcs dce_funcs = { .dmcu_init = dce_dmcu_init, @@ -807,7 +807,7 @@ static const struct dmcu_funcs dce_funcs = { .is_dmcu_initialized = dce_is_dmcu_initialized }; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) static const struct dmcu_funcs dcn10_funcs = { .dmcu_init = dcn10_dmcu_init, .load_iram = dcn10_dmcu_load_iram, @@ -864,7 +864,7 @@ static void dce_dmcu_construct( dmcu_dce->dmcu_mask = dmcu_mask; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) static void dcn21_dmcu_construct( struct dce_dmcu *dmcu_dce, struct dc_context *ctx, @@ -905,7 +905,7 @@ struct dmcu *dce_dmcu_create( return &dmcu_dce->base; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) struct dmcu *dcn10_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 2baaac1e5156..451574971b96 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -137,7 +137,7 @@ static void dce110_update_generic_info_packet( AFMT_GENERIC0_UPDATE, (packet_index == 0), AFMT_GENERIC2_UPDATE, (packet_index == 2)); } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (REG(AFMT_VBI_PACKET_CONTROL1)) { switch (packet_index) { case 0: @@ -231,7 +231,7 @@ static void dce110_update_hdmi_info_packet( HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case 4: if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, @@ -278,7 +278,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t h_active_start; uint32_t v_active_start; uint32_t misc0 = 0; @@ -330,7 +330,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN) REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (enc110->se_mask->DP_VID_N_MUL) REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1); #endif @@ -341,7 +341,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( break; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (REG(DP_MSA_MISC)) misc1 = REG_READ(DP_MSA_MISC); #endif @@ -375,7 +375,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( /* set dynamic range and YCbCr range */ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: colorimetry_bpc = 0; @@ -455,7 +455,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( DP_DYN_RANGE, dynamic_range_rgb, DP_YCBCR_RANGE, dynamic_range_ycbcr); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (REG(DP_MSA_COLORIMETRY)) REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0); @@ -490,7 +490,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( hw_crtc_timing.v_front_porch; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* start at begining of left border */ if (REG(DP_MSA_TIMING_PARAM2)) REG_SET_2(DP_MSA_TIMING_PARAM2, 0, @@ -787,7 +787,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets( dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd); } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (enc110->se_mask->HDMI_DB_DISABLE) { /* for bring up, disable dp double TODO */ if (REG(HDMI_DB_CONTROL)) @@ -825,7 +825,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets( HDMI_GENERIC1_LINE, 0, HDMI_GENERIC1_SEND, 0); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* stop generic packets 2 & 3 on HDMI */ if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 811896a43b67..3f5fbad587e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1223,7 +1223,7 @@ static void program_scaler(const struct dc *dc, { struct tg_color color = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* TOFPGA */ if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL) return; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c index 64b31edc8cf6..b6391a5ead78 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c @@ -23,7 +23,7 @@ * */ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "reg_helper.h" #include "resource.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h index c175edd0bae7..d56ea7c8171e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h @@ -24,7 +24,7 @@ #ifndef __DC_DWBC_DCN10_H__ #define __DC_DWBC_DCN10_H__ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* DCN */ #define BASE_INNER(seg) \ diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 6ec923a2a183..32c1eedfa5e3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -38,7 +38,7 @@ endif CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) @@ -54,7 +54,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags) DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ dml_common_defs.o -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile index 013cfac4ff55..202baa210cc8 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile @@ -61,7 +61,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120) ############################################################################### # DCN 1x ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10)) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index fb2d66729ca3..d2d36d48caaa 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -45,7 +45,7 @@ #include "dce80/hw_factory_dce80.h" #include "dce110/hw_factory_dce110.h" #include "dce120/hw_factory_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/hw_factory_dcn10.h" #endif #include "dcn20/hw_factory_dcn20.h" @@ -86,7 +86,7 @@ bool dal_hw_factory_init( case DCE_VERSION_12_1: dal_hw_factory_dce120_init(factory); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: dal_hw_factory_dcn10_init(factory); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index 55acfda9ea63..5d396657a1ee 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -43,7 +43,7 @@ #include "dce80/hw_translate_dce80.h" #include "dce110/hw_translate_dce110.h" #include "dce120/hw_translate_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/hw_translate_dcn10.h" #endif #include "dcn20/hw_translate_dcn20.h" @@ -81,7 +81,7 @@ bool dal_hw_translate_init( case DCE_VERSION_12_1: dal_hw_translate_dce120_init(translate); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: dal_hw_translate_dcn10_init(translate); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c98d887cc6e2..e0aac234537f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -33,7 +33,7 @@ #include "dc_bios_types.h" #include "mem_input.h" #include "hubp.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "mpc.h" #endif #include "dwb.h" @@ -290,7 +290,7 @@ struct pipe_ctx { struct pipe_ctx *next_odm_pipe; struct pipe_ctx *prev_odm_pipe; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct _vcs_dpi_display_dlg_regs_st dlg_regs; struct _vcs_dpi_display_ttu_regs_st ttu_regs; struct _vcs_dpi_display_rq_regs_st rq_regs; @@ -368,7 +368,7 @@ struct dc_state { /* Note: these are big structures, do *not* put on stack! */ struct dm_pp_display_configuration pp_display_cfg; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct dcn_bw_internal_vars dcn_bw_vars; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index aed67754e81b..735f41901b88 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -54,7 +54,7 @@ enum dwb_source { /* DCN1.x, DCN2.x support 2 pipes */ enum dwb_pipe { dwb_pipe0 = 0, -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) dwb_pipe1, #endif dwb_pipe_max_num, diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index c26300c3936d..0f682ac53bb2 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -60,7 +60,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12) ############################################################################### # DCN 1x ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN IRQ_DCN1 = irq_service_dcn10.o AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1)) diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index 0878550a8178..33053b9fe6bd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -38,7 +38,7 @@ #include "dce120/irq_service_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/irq_service_dcn10.h" #endif diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index 9a4b5bab03c6..13b9a9bb32c8 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -49,7 +49,7 @@ #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__) -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include #endif -- cgit From defeb878c46e950991d93c678203d2dc980957c0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 6 Nov 2019 20:51:14 -0500 Subject: drm/amdgpu/display: fix the build when CONFIG_DRM_AMD_DC_DCN is not set Need to protect some DSC functions. Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d01186029e87..37c56156a116 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3926,7 +3926,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; int mode_refresh; int preferred_refresh = 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) struct dsc_dec_dpcd_caps dsc_caps; +#endif uint32_t link_bandwidth_kbps; struct dc_sink *sink = NULL; @@ -4005,12 +4007,15 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, stream->timing.flags.DSC = 0; if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { +#if defined(CONFIG_DRM_AMD_DC_DCN) dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw, &dsc_caps); +#endif link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); +#if defined(CONFIG_DRM_AMD_DC_DCN) if (dsc_caps.is_dsc_supported) if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], &dsc_caps, @@ -4019,6 +4024,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, &stream->timing, &stream->timing.dsc_cfg)) stream->timing.flags.DSC = 1; +#endif } update_stream_scaling_settings(&mode, dm_state, stream); -- cgit From b82197450c0c817deb8e9d7358495d97a373d457 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 8 Nov 2019 11:22:57 -0500 Subject: drm/amdgpu/display: fix warning when CONFIG_DRM_AMD_DC_DCN is not set dm_dcn_crtc_high_irq() is only used when CONFIG_DRM_AMD_DC_DCN is set. Reviewed-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 37c56156a116..c2700bc28c02 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -485,7 +485,7 @@ static void dm_crtc_high_irq(void *interrupt_params) } } - +#if defined(CONFIG_DRM_AMD_DC_DCN) /** * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs * @interrupt params - interrupt parameters @@ -547,6 +547,7 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) spin_unlock_irqrestore(&adev->ddev->event_lock, flags); } +#endif static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) -- cgit From bae028e3e521e8cb8caf2cc16a455ce4c55f2332 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Mon, 4 Nov 2019 21:27:20 +0800 Subject: drm/amdgpu: remove 4 set but not used variable in amdgpu_atombios_get_connector_info_from_object_table Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c: In function 'amdgpu_atombios_get_connector_info_from_object_table': drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c:376:26: warning: variable 'grph_obj_num' set but not used [-Wunused-but-set-variable] drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c:376:13: warning: variable 'grph_obj_id' set but not used [-Wunused-but-set-variable] drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c:341:37: warning: variable 'con_obj_type' set but not used [-Wunused-but-set-variable] drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c:341:24: warning: variable 'con_obj_num' set but not used [-Wunused-but-set-variable] They are never used, so can be removed. Fixes: d38ceaf99ed0 ("drm/amdgpu: add core driver (v4)") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 72232fccf61a..be6d0cfe41ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -338,17 +338,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * path_size += le16_to_cpu(path->usSize); if (device_support & le16_to_cpu(path->usDeviceTag)) { - uint8_t con_obj_id, con_obj_num, con_obj_type; - - con_obj_id = + uint8_t con_obj_id = (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - con_obj_num = - (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK) - >> ENUM_ID_SHIFT; - con_obj_type = - (le16_to_cpu(path->usConnObjectId) & - OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; /* Skip TV/CV support */ if ((le16_to_cpu(path->usDeviceTag) == @@ -373,14 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * router.ddc_valid = false; router.cd_valid = false; for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { - uint8_t grph_obj_id, grph_obj_num, grph_obj_type; - - grph_obj_id = - (le16_to_cpu(path->usGraphicObjIds[j]) & - OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - grph_obj_num = - (le16_to_cpu(path->usGraphicObjIds[j]) & - ENUM_ID_MASK) >> ENUM_ID_SHIFT; + uint8_t grph_obj_type= grph_obj_type = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; -- cgit From b8b721305770cf85bffbe7ce1e0dc5fb6c4fef47 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Mon, 4 Nov 2019 21:27:21 +0800 Subject: drm/amdgpu: add function parameter description in 'amdgpu_device_set_cg_state' Fixes gcc warning: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c:1954: warning: Function parameter or member 'state' not described in 'amdgpu_device_set_cg_state' Fixes: e3ecdffac9cc ("drm/amdgpu: add documentation for amdgpu_device.c") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9d210bb9bf33..0ad61febbb5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1936,6 +1936,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) * amdgpu_device_set_cg_state - set clockgating for amdgpu device * * @adev: amdgpu_device pointer + * @state: clockgating state (gate or ungate) * * The list of all the hardware IPs that make up the asic is walked and the * set_clockgating_state callbacks are run. -- cgit From e8b74035d8032dd8cc8fe8ff6eaecb20827227c2 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Mon, 4 Nov 2019 21:27:22 +0800 Subject: drm/amdgpu: add function parameter description in 'amdgpu_gart_bind' Fixes gcc warning: drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c:313: warning: Function parameter or member 'flags' not described in 'amdgpu_gart_bind' Fixes: d38ceaf99ed0 ("drm/amdgpu: add core driver (v4)") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 19705e399905..e01e681d2a60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -302,6 +302,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, * @pages: number of pages to bind * @pagelist: pages to bind * @dma_addr: DMA addresses of pages + * @flags: page table entry flags * * Binds the requested pages to the gart page table * (all asics). -- cgit From 5bea7fedb7fe4d5e6d3ba9f385dd3619fb004ce7 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Mon, 4 Nov 2019 21:27:23 +0800 Subject: drm/amdgpu: remove set but not used variable 'dig_connector' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdgpu/atombios_dp.c: In function ‘amdgpu_atombios_dp_get_panel_mode’: drivers/gpu/drm/amd/amdgpu/atombios_dp.c:364:36: warning: variable ‘dig_connector’ set but not used [-Wunused-but-set-variable] It is never used, so can be removed. Fixes: d38ceaf99ed0 ("drm/amdgpu: add core driver (v4)") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/atombios_dp.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 6858cde9fc5d..94265306ab11 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -361,7 +361,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder, struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - struct amdgpu_connector_atom_dig *dig_connector; int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector); u8 tmp; @@ -369,8 +368,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder, if (!amdgpu_connector->con_priv) return panel_mode; - dig_connector = amdgpu_connector->con_priv; - if (dp_bridge != ENCODER_OBJECT_ID_NONE) { /* DP bridge chips */ if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, -- cgit From d1d09dc417826f5a983e0f4f212f227beeb65e29 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Mon, 4 Nov 2019 21:27:24 +0800 Subject: drm/amdgpu: remove set but not used variable 'dig' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdgpu/atombios_dp.c: In function ‘amdgpu_atombios_dp_link_train’: drivers/gpu/drm/amd/amdgpu/atombios_dp.c:716:34: warning: variable ‘dig’ set but not used [-Wunused-but-set-variable] Fixes: d38ceaf99ed0 ("drm/amdgpu: add core driver (v4)") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/atombios_dp.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 94265306ab11..ea702a64f807 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -710,7 +710,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig; struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector_atom_dig *dig_connector; struct amdgpu_atombios_dp_link_train_info dp_info; @@ -718,7 +717,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder, if (!amdgpu_encoder->enc_priv) return; - dig = amdgpu_encoder->enc_priv; amdgpu_connector = to_amdgpu_connector(connector); if (!amdgpu_connector->con_priv) -- cgit From 220ac8d1444054ade07ce14498fcda266410f90e Mon Sep 17 00:00:00 2001 From: yu kuai Date: Mon, 4 Nov 2019 21:27:25 +0800 Subject: drm/amdgpu: remove always false comparison in 'amdgpu_atombios_i2c_process_i2c_ch' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wtype-limits' warning: drivers/gpu/drm/amd/amdgpu/atombios_i2c.c: In function ‘amdgpu_atombios_i2c_process_i2c_ch’: drivers/gpu/drm/amd/amdgpu/atombios_i2c.c:79:11: warning: comparison is always false due to limited range of data type [-Wtype-limits] 'num' is 'u8', so it will never be greater than 'TOM_MAX_HW_I2C_READ', which is defined as 255. Therefore, the comparison can be removed. Fixes: d38ceaf99ed0 ("drm/amdgpu: add core driver (v4)") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/atombios_i2c.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c index 980c363b1a0a..b4cc7c55fa16 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c @@ -76,11 +76,6 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan, } args.lpI2CDataOut = cpu_to_le16(out); } else { - if (num > ATOM_MAX_HW_I2C_READ) { - DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); - r = -EINVAL; - goto done; - } args.ucRegIndex = 0; args.lpI2CDataOut = 0; } -- cgit From e98042db2cb8d0b728cd76e05b9c2e1c84b7f72b Mon Sep 17 00:00:00 2001 From: yu kuai Date: Mon, 4 Nov 2019 21:27:26 +0800 Subject: drm/amdgpu: remove set but not used variable 'mc_shared_chmap' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c: In function ‘gfx_v8_0_gpu_early_init’: drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c:1713:6: warning: variable ‘mc_shared_chmap’ set but not used [-Wunused-but-set-variable] Fixes: 0bde3a95eaa9 ("drm/amdgpu: split gfx8 gpu init into sw and hw parts") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ffbde9136372..14e774d52727 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1710,7 +1710,7 @@ fail: static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; - u32 mc_shared_chmap, mc_arb_ramcfg; + u32 mc_arb_ramcfg; u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; u32 tmp; int ret; @@ -1850,7 +1850,6 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) break; } - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; -- cgit From d785476c608c621b345dd9396e8b21e90375cb0e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 8 Nov 2019 14:45:27 +0000 Subject: drm/amd/display: remove duplicated assignment to grph_obj_type Variable grph_obj_type is being assigned twice, one of these is redundant so remove it. Addresses-Coverity: ("Evaluation order violation") Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index be6d0cfe41ae..9ba80d828876 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -365,8 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * router.ddc_valid = false; router.cd_valid = false; for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { - uint8_t grph_obj_type= - grph_obj_type = + uint8_t grph_obj_type = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; -- cgit From b74361974bfa242b5010ec6b83c7e275c190a247 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 8 Nov 2019 16:29:45 +0000 Subject: drm/amd/display: remove redundant variable status Variable status is redundant, it is being initialized with a value that is over-written later and this is being returned immediately after the assignment. Clean up the code by removing status and just returning the value returned from the call to function dc->hwss.dmdata_status_done. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 371d49e9b745..88a84bfaea6f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -565,7 +565,6 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) { - bool status = true; struct pipe_ctx *pipe = NULL; int i; @@ -581,8 +580,7 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) if (i == MAX_PIPES) return true; - status = dc->hwss.dmdata_status_done(pipe); - return status; + return dc->hwss.dmdata_status_done(pipe); } bool dc_stream_set_dynamic_metadata(struct dc *dc, -- cgit From b805323c31004258ad57736ac6edf8e50d6cc22c Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Wed, 30 Oct 2019 18:07:20 -0400 Subject: drm/amdkfd: Adjust function sequences to avoid unnecessary declarations This is cleaner. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c index 9a4bafb2e175..3b5ca2b1d7a6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c @@ -26,18 +26,6 @@ #include "kfd_pm4_headers_ai.h" #include "kfd_pm4_opcodes.h" -static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size); -static void uninitialize_v9(struct kernel_queue *kq); -static void submit_packet_v9(struct kernel_queue *kq); - -void kernel_queue_init_v9(struct kernel_queue_ops *ops) -{ - ops->initialize = initialize_v9; - ops->uninitialize = uninitialize_v9; - ops->submit_packet = submit_packet_v9; -} - static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev, enum kfd_queue_type type, unsigned int queue_size) { @@ -67,6 +55,13 @@ static void submit_packet_v9(struct kernel_queue *kq) kq->pending_wptr64); } +void kernel_queue_init_v9(struct kernel_queue_ops *ops) +{ + ops->initialize = initialize_v9; + ops->uninitialize = uninitialize_v9; + ops->submit_packet = submit_packet_v9; +} + static int pm_map_process_v9(struct packet_manager *pm, uint32_t *buffer, struct qcm_process_device *qpd) { -- cgit From d2c6c1077ae2295929f9ee0cfed8c4793a19270e Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Wed, 30 Oct 2019 19:22:11 -0400 Subject: drm/amdkfd: Only keep release_mem function for Hawaii release_mem is only used for Hawaii, but because GFX7 and GFX8 share the same function pointer structure, so we only delete release_mem for GFX9 and GFX10. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c | 35 ++--------------------- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 33 ++------------------- 2 files changed, 4 insertions(+), 64 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c index aed32ab7102e..bfd6221acae9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c @@ -298,37 +298,6 @@ static int pm_query_status_v10(struct packet_manager *pm, uint32_t *buffer, return 0; } - -static int pm_release_mem_v10(uint64_t gpu_addr, uint32_t *buffer) -{ - struct pm4_mec_release_mem *packet; - - WARN_ON(!buffer); - - packet = (struct pm4_mec_release_mem *)buffer; - memset(buffer, 0, sizeof(struct pm4_mec_release_mem)); - - packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM, - sizeof(struct pm4_mec_release_mem)); - - packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; - packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe; - packet->bitfields2.tcl1_action_ena = 1; - packet->bitfields2.tc_action_ena = 1; - packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru; - - packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low; - packet->bitfields3.int_sel = - int_sel__mec_release_mem__send_interrupt_after_write_confirm; - - packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; - packet->address_hi = upper_32_bits(gpu_addr); - - packet->data_lo = 0; - - return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int); -} - const struct packet_manager_funcs kfd_v10_pm_funcs = { .map_process = pm_map_process_v10, .runlist = pm_runlist_v10, @@ -336,13 +305,13 @@ const struct packet_manager_funcs kfd_v10_pm_funcs = { .map_queues = pm_map_queues_v10, .unmap_queues = pm_unmap_queues_v10, .query_status = pm_query_status_v10, - .release_mem = pm_release_mem_v10, + .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process), .runlist_size = sizeof(struct pm4_mes_runlist), .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), .query_status_size = sizeof(struct pm4_mes_query_status), - .release_mem_size = sizeof(struct pm4_mec_release_mem) + .release_mem_size = 0, }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c index 3b5ca2b1d7a6..f0e4910a8865 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c @@ -336,35 +336,6 @@ static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, return 0; } - -static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer) -{ - struct pm4_mec_release_mem *packet; - - packet = (struct pm4_mec_release_mem *)buffer; - memset(buffer, 0, sizeof(struct pm4_mec_release_mem)); - - packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM, - sizeof(struct pm4_mec_release_mem)); - - packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; - packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe; - packet->bitfields2.tcl1_action_ena = 1; - packet->bitfields2.tc_action_ena = 1; - packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru; - - packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low; - packet->bitfields3.int_sel = - int_sel__mec_release_mem__send_interrupt_after_write_confirm; - - packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; - packet->address_hi = upper_32_bits(gpu_addr); - - packet->data_lo = 0; - - return 0; -} - const struct packet_manager_funcs kfd_v9_pm_funcs = { .map_process = pm_map_process_v9, .runlist = pm_runlist_v9, @@ -372,12 +343,12 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = { .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, .query_status = pm_query_status_v9, - .release_mem = pm_release_mem_v9, + .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process), .runlist_size = sizeof(struct pm4_mes_runlist), .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), .query_status_size = sizeof(struct pm4_mes_query_status), - .release_mem_size = sizeof(struct pm4_mec_release_mem) + .release_mem_size = 0, }; -- cgit From 5d4634b5d4456a0c06f93e32616f7f4f6d23eecd Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Wed, 30 Oct 2019 19:22:11 -0400 Subject: drm/amdkfd: Use kernel queue v9 functions for v10 The kernel queue functions for v9 and v10 are the same except pm_map_process_v* which have small difference, so they should be reused. This eliminates the need of reapplying several patches which were applied on v9 but not on v10, such as bigger GWS and more than 2 SDMA engine support which were introduced on Arcturus. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/Makefile | 1 - drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 1 - drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c | 317 ---------------------- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 16 +- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 - 7 files changed, 14 insertions(+), 333 deletions(-) delete mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 48155060a57c..017a8b7156da 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -41,7 +41,6 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_kernel_queue_cik.o \ $(AMDKFD_PATH)/kfd_kernel_queue_vi.o \ $(AMDKFD_PATH)/kfd_kernel_queue_v9.o \ - $(AMDKFD_PATH)/kfd_kernel_queue_v10.o \ $(AMDKFD_PATH)/kfd_packet_manager.o \ $(AMDKFD_PATH)/kfd_process_queue_manager.o \ $(AMDKFD_PATH)/kfd_device_queue_manager.o \ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 11d244891393..0d966408ea87 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -332,12 +332,10 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, case CHIP_RAVEN: case CHIP_RENOIR: case CHIP_ARCTURUS: - kernel_queue_init_v9(&kq->ops_asic_specific); - break; case CHIP_NAVI10: case CHIP_NAVI12: case CHIP_NAVI14: - kernel_queue_init_v10(&kq->ops_asic_specific); + kernel_queue_init_v9(&kq->ops_asic_specific); break; default: WARN(1, "Unexpected ASIC family %u", diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index 365fc674fea4..a7116a939029 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -102,6 +102,5 @@ struct kernel_queue { void kernel_queue_init_cik(struct kernel_queue_ops *ops); void kernel_queue_init_vi(struct kernel_queue_ops *ops); void kernel_queue_init_v9(struct kernel_queue_ops *ops); -void kernel_queue_init_v10(struct kernel_queue_ops *ops); #endif /* KFD_KERNEL_QUEUE_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c deleted file mode 100644 index bfd6221acae9..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "kfd_kernel_queue.h" -#include "kfd_device_queue_manager.h" -#include "kfd_pm4_headers_ai.h" -#include "kfd_pm4_opcodes.h" -#include "gc/gc_10_1_0_sh_mask.h" - -static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size); -static void uninitialize_v10(struct kernel_queue *kq); -static void submit_packet_v10(struct kernel_queue *kq); - -void kernel_queue_init_v10(struct kernel_queue_ops *ops) -{ - ops->initialize = initialize_v10; - ops->uninitialize = uninitialize_v10; - ops->submit_packet = submit_packet_v10; -} - -static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size) -{ - int retval; - - retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); - if (retval != 0) - return false; - - kq->eop_gpu_addr = kq->eop_mem->gpu_addr; - kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; - - memset(kq->eop_kernel_addr, 0, PAGE_SIZE); - - return true; -} - -static void uninitialize_v10(struct kernel_queue *kq) -{ - kfd_gtt_sa_free(kq->dev, kq->eop_mem); -} - -static void submit_packet_v10(struct kernel_queue *kq) -{ - *kq->wptr64_kernel = kq->pending_wptr64; - write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, - kq->pending_wptr64); -} - -static int pm_map_process_v10(struct packet_manager *pm, - uint32_t *buffer, struct qcm_process_device *qpd) -{ - struct pm4_mes_map_process *packet; - uint64_t vm_page_table_base_addr = qpd->page_table_base; - - packet = (struct pm4_mes_map_process *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_map_process)); - - packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, - sizeof(struct pm4_mes_map_process)); - packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; - packet->bitfields2.process_quantum = 1; - packet->bitfields2.pasid = qpd->pqm->process->pasid; - packet->bitfields14.gds_size = qpd->gds_size; - packet->bitfields14.num_gws = qpd->num_gws; - packet->bitfields14.num_oac = qpd->num_oac; - packet->bitfields14.sdma_enable = 1; - - packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; - - packet->sh_mem_config = qpd->sh_mem_config; - packet->sh_mem_bases = qpd->sh_mem_bases; - if (qpd->tba_addr) { - packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); - packet->sq_shader_tba_hi = (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT) | - upper_32_bits(qpd->tba_addr >> 8); - packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); - packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); - } - - packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); - packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); - - packet->vm_context_page_table_base_addr_lo32 = - lower_32_bits(vm_page_table_base_addr); - packet->vm_context_page_table_base_addr_hi32 = - upper_32_bits(vm_page_table_base_addr); - - return 0; -} - -static int pm_runlist_v10(struct packet_manager *pm, uint32_t *buffer, - uint64_t ib, size_t ib_size_in_dwords, bool chain) -{ - struct pm4_mes_runlist *packet; - - int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; - - /* Determine the number of processes to map together to HW: - * it can not exceed the number of VMIDs available to the - * scheduler, and it is determined by the smaller of the number - * of processes in the runlist and kfd module parameter - * hws_max_conc_proc. - * Note: the arbitration between the number of VMIDs and - * hws_max_conc_proc has been done in - * kgd2kfd_device_init(). - */ - concurrent_proc_cnt = min(pm->dqm->processes_count, - kfd->max_proc_per_quantum); - - - packet = (struct pm4_mes_runlist *)buffer; - - memset(buffer, 0, sizeof(struct pm4_mes_runlist)); - packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, - sizeof(struct pm4_mes_runlist)); - - packet->bitfields4.ib_size = ib_size_in_dwords; - packet->bitfields4.chain = chain ? 1 : 0; - packet->bitfields4.offload_polling = 0; - packet->bitfields4.valid = 1; - packet->bitfields4.process_cnt = concurrent_proc_cnt; - packet->ordinal2 = lower_32_bits(ib); - packet->ib_base_hi = upper_32_bits(ib); - - return 0; -} - -static int pm_map_queues_v10(struct packet_manager *pm, uint32_t *buffer, - struct queue *q, bool is_static) -{ - struct pm4_mes_map_queues *packet; - bool use_static = is_static; - - packet = (struct pm4_mes_map_queues *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); - - packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, - sizeof(struct pm4_mes_map_queues)); - packet->bitfields2.num_queues = 1; - packet->bitfields2.queue_sel = - queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; - - packet->bitfields2.engine_sel = - engine_sel__mes_map_queues__compute_vi; - packet->bitfields2.queue_type = - queue_type__mes_map_queues__normal_compute_vi; - - switch (q->properties.type) { - case KFD_QUEUE_TYPE_COMPUTE: - if (use_static) - packet->bitfields2.queue_type = - queue_type__mes_map_queues__normal_latency_static_queue_vi; - break; - case KFD_QUEUE_TYPE_DIQ: - packet->bitfields2.queue_type = - queue_type__mes_map_queues__debug_interface_queue_vi; - break; - case KFD_QUEUE_TYPE_SDMA: - case KFD_QUEUE_TYPE_SDMA_XGMI: - packet->bitfields2.engine_sel = q->properties.sdma_engine_id + - engine_sel__mes_map_queues__sdma0_vi; - use_static = false; /* no static queues under SDMA */ - break; - default: - WARN(1, "queue type %d\n", q->properties.type); - return -EINVAL; - } - packet->bitfields3.doorbell_offset = - q->properties.doorbell_off; - - packet->mqd_addr_lo = - lower_32_bits(q->gart_mqd_addr); - - packet->mqd_addr_hi = - upper_32_bits(q->gart_mqd_addr); - - packet->wptr_addr_lo = - lower_32_bits((uint64_t)q->properties.write_ptr); - - packet->wptr_addr_hi = - upper_32_bits((uint64_t)q->properties.write_ptr); - - return 0; -} - -static int pm_unmap_queues_v10(struct packet_manager *pm, uint32_t *buffer, - enum kfd_queue_type type, - enum kfd_unmap_queues_filter filter, - uint32_t filter_param, bool reset, - unsigned int sdma_engine) -{ - struct pm4_mes_unmap_queues *packet; - - packet = (struct pm4_mes_unmap_queues *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); - - packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, - sizeof(struct pm4_mes_unmap_queues)); - switch (type) { - case KFD_QUEUE_TYPE_COMPUTE: - case KFD_QUEUE_TYPE_DIQ: - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__compute; - break; - case KFD_QUEUE_TYPE_SDMA: - case KFD_QUEUE_TYPE_SDMA_XGMI: - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__sdma0 + sdma_engine; - break; - default: - WARN(1, "queue type %d\n", type); - break; - } - - if (reset) - packet->bitfields2.action = - action__mes_unmap_queues__reset_queues; - else - packet->bitfields2.action = - action__mes_unmap_queues__preempt_queues; - - switch (filter) { - case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__perform_request_on_specified_queues; - packet->bitfields2.num_queues = 1; - packet->bitfields3b.doorbell_offset0 = filter_param; - break; - case KFD_UNMAP_QUEUES_FILTER_BY_PASID: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; - packet->bitfields3a.pasid = filter_param; - break; - case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__unmap_all_queues; - break; - case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: - /* in this case, we do not preempt static queues */ - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__unmap_all_non_static_queues; - break; - default: - WARN(1, "filter %d\n", filter); - break; - } - - return 0; - -} - -static int pm_query_status_v10(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value) -{ - struct pm4_mes_query_status *packet; - - packet = (struct pm4_mes_query_status *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_query_status)); - - - packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, - sizeof(struct pm4_mes_query_status)); - - packet->bitfields2.context_id = 0; - packet->bitfields2.interrupt_sel = - interrupt_sel__mes_query_status__completion_status; - packet->bitfields2.command = - command__mes_query_status__fence_only_after_write_ack; - - packet->addr_hi = upper_32_bits((uint64_t)fence_address); - packet->addr_lo = lower_32_bits((uint64_t)fence_address); - packet->data_hi = upper_32_bits((uint64_t)fence_value); - packet->data_lo = lower_32_bits((uint64_t)fence_value); - - return 0; -} - -const struct packet_manager_funcs kfd_v10_pm_funcs = { - .map_process = pm_map_process_v10, - .runlist = pm_runlist_v10, - .set_resources = pm_set_resources_vi, - .map_queues = pm_map_queues_v10, - .unmap_queues = pm_unmap_queues_v10, - .query_status = pm_query_status_v10, - .release_mem = NULL, - .map_process_size = sizeof(struct pm4_mes_map_process), - .runlist_size = sizeof(struct pm4_mes_runlist), - .set_resources_size = sizeof(struct pm4_mes_set_resources), - .map_queues_size = sizeof(struct pm4_mes_map_queues), - .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), - .query_status_size = sizeof(struct pm4_mes_query_status), - .release_mem_size = 0, -}; - diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c index f0e4910a8865..9e0eaf446bab 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c @@ -25,6 +25,7 @@ #include "kfd_device_queue_manager.h" #include "kfd_pm4_headers_ai.h" #include "kfd_pm4_opcodes.h" +#include "gc/gc_10_1_0_sh_mask.h" static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev, enum kfd_queue_type type, unsigned int queue_size) @@ -85,10 +86,17 @@ static int pm_map_process_v9(struct packet_manager *pm, packet->sh_mem_config = qpd->sh_mem_config; packet->sh_mem_bases = qpd->sh_mem_bases; - packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); - packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8); - packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); - packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); + if (qpd->tba_addr) { + packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); + /* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is + * not defined, so setting it won't do any harm. + */ + packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8) + | 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT; + + packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); + packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); + } packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 83ef4b3dd2fb..700be4f80867 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -241,12 +241,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) case CHIP_RAVEN: case CHIP_RENOIR: case CHIP_ARCTURUS: - pm->pmf = &kfd_v9_pm_funcs; - break; case CHIP_NAVI10: case CHIP_NAVI12: case CHIP_NAVI14: - pm->pmf = &kfd_v10_pm_funcs; + pm->pmf = &kfd_v9_pm_funcs; break; default: WARN(1, "Unexpected ASIC family %u", diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 060a9e8b301e..161dbae737b6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -972,7 +972,6 @@ struct packet_manager_funcs { extern const struct packet_manager_funcs kfd_vi_pm_funcs; extern const struct packet_manager_funcs kfd_v9_pm_funcs; -extern const struct packet_manager_funcs kfd_v10_pm_funcs; int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); void pm_uninit(struct packet_manager *pm); @@ -991,9 +990,6 @@ void pm_release_ib(struct packet_manager *pm); /* Following PM funcs can be shared among VI and AI */ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); -int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, - struct scheduling_resources *res); - uint64_t kfd_get_number_elems(struct kfd_dev *kfd); -- cgit From 29453755715cbecd1112e0b30260a5eb2e23f51c Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Tue, 15 Jan 2019 18:11:32 -0500 Subject: drm/amdkfd: Simplify the mmap offset related bit operations The new code uses straightforward bit shifts and thus has better readability. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 17 +++++++---------- drivers/gpu/drm/amd/amdkfd/kfd_events.c | 1 - drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 9 +++------ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 3 +-- 4 files changed, 11 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 9af45d07515b..568989b7947f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -296,7 +296,6 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, /* Return gpu_id as doorbell offset for mmap usage */ args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); - args->doorbell_offset <<= PAGE_SHIFT; if (KFD_IS_SOC15(dev->device_info->asic_family)) /* On SOC15 ASICs, doorbell allocation must be * per-device, and independent from the per-process @@ -1312,10 +1311,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, /* MMIO is mapped through kfd device * Generate a kfd mmap offset */ - if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) { - args->mmap_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(args->gpu_id); - args->mmap_offset <<= PAGE_SHIFT; - } + if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) + args->mmap_offset = KFD_MMAP_TYPE_MMIO + | KFD_MMAP_GPU_ID(args->gpu_id); return 0; @@ -1899,20 +1897,19 @@ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) { struct kfd_process *process; struct kfd_dev *dev = NULL; - unsigned long vm_pgoff; + unsigned long mmap_offset; unsigned int gpu_id; process = kfd_get_process(current); if (IS_ERR(process)) return PTR_ERR(process); - vm_pgoff = vma->vm_pgoff; - vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff); - gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff); + mmap_offset = vma->vm_pgoff << PAGE_SHIFT; + gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset); if (gpu_id) dev = kfd_device_by_id(gpu_id); - switch (vm_pgoff & KFD_MMAP_TYPE_MASK) { + switch (mmap_offset & KFD_MMAP_TYPE_MASK) { case KFD_MMAP_TYPE_DOORBELL: if (!dev) return -ENODEV; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 908081c85de1..1f8365575b12 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -346,7 +346,6 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p, ret = create_signal_event(devkfd, p, ev); if (!ret) { *event_page_offset = KFD_MMAP_TYPE_EVENTS; - *event_page_offset <<= PAGE_SHIFT; *event_slot_index = ev->event_id; } break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 161dbae737b6..3f7c3d651a52 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -59,24 +59,21 @@ * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these * defines are w.r.t to PAGE_SIZE */ -#define KFD_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT) +#define KFD_MMAP_TYPE_SHIFT 62 #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT) -#define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT) +#define KFD_MMAP_GPU_ID_SHIFT 46 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ << KFD_MMAP_GPU_ID_SHIFT) #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ & KFD_MMAP_GPU_ID_MASK) -#define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ +#define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ >> KFD_MMAP_GPU_ID_SHIFT) -#define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT) -#define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK) - /* * When working with cp scheduler we should assign the HIQ manually or via * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 10f9af5784f2..8276601a122f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -560,8 +560,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) continue; - offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id)) - << PAGE_SHIFT; + offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); qpd->tba_addr = (int64_t)vm_mmap(filep, 0, KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC, MAP_SHARED, offset); -- cgit From 243a8f41dbc8da54e78e30cb7082f1d807329c42 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 9 Nov 2019 19:49:23 +0000 Subject: drm/amd/display: fix spelling mistake "exeuction" -> "execution" There are spelling mistakes in a DC_ERROR message and a comment. Fix these. Reviewed-by: Nicholas Kazlauskas Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 2 +- drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 61cefe0a3790..b65b66025267 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -92,7 +92,7 @@ void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) - DC_ERROR("Error starting DMUB exeuction: status=%d\n", status); + DC_ERROR("Error starting DMUB execution: status=%d\n", status); } void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h index aa8f0396616d..45e427d1952e 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -416,7 +416,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub * @dmub: the dmub service * - * Begins exeuction of queued commands on the dmub. + * Begins execution of queued commands on the dmub. * * Return: * DMUB_STATUS_OK - success -- cgit From 4b31b1720704bf058b2f3df1d354ac23a669c6f5 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 9 Nov 2019 15:49:21 +0000 Subject: drm/amd/display: remove duplicated comparison expression There is comparison expression that is duplicated and hence one of the expressions can be removed. Remove it. Addresses-Coverity: ("Same on both sides") Fixes: 12e2b2d4c65f ("drm/amd/display: add dcc programming for dual plane") Reviewed-by: Nicholas Kazlauskas Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 1fdba13b3d0f..1fa255e077d0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1491,7 +1491,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa } if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch - || u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { update_flags->bits.plane_size_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); -- cgit From cb7709223b39287a020e92ff880d11d377dc53a1 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Mon, 11 Nov 2019 11:45:55 +0800 Subject: drm/amd/powerplay: remove set but not used variable 'vbios_version', 'data' Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c: In function smu7_check_mc_firmware: drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c:4215:11: warning: variable vbios_version set but not used [-Wunused-but-set-variable] drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c: In function smu7_get_performance_level: drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c:5054:21: warning: variable data set but not used [-Wunused-but-set-variable] 'vbios_version' is introduced by commit 599a7e9fe1b6 ("drm/amd/powerplay: implement smu7 hwmgr to manager asics with smu ip version 7."), but never used, so remove it. 'data' is introduced by commit f688b614b643 ("drm/amd/pp: Implement get_performance_level for legacy dgpu"), but never used, so remove it. Reviewed-by: Evan Quan Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 390e7524bef4..621cf1ade981 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4225,7 +4225,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - uint32_t vbios_version; uint32_t tmp; /* Read MC indirect register offset 0x9F bits [3:0] to see @@ -4234,7 +4233,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) */ smu7_get_mc_microcode_version(hwmgr); - vbios_version = hwmgr->microcode_version_info.MC & 0xf; data->need_long_memory_training = false; @@ -5064,13 +5062,11 @@ static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw PHM_PerformanceLevel *level) { const struct smu7_power_state *ps; - struct smu7_hwmgr *data; uint32_t i; if (level == NULL || hwmgr == NULL || state == NULL) return -EINVAL; - data = hwmgr->backend; ps = cast_const_phw_smu7_power_state(state); i = index > ps->performance_level_count - 1 ? -- cgit From 4bf321c177c74f7d834956387cd74805c3098322 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Mon, 11 Nov 2019 11:45:56 +0800 Subject: drm/amd/powerplay: remove set but not used variable 'data' Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c: In function vega10_get_performance_level: drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c:5217:23: warning: variable data set but not used [-Wunused-but-set-variable] 'data' is introduced by commit f688b614b643 ("drm/amd/pp: Implement get_performance_level for legacy dgpu"), but never used, so remove it. Reviewed-by: Evan Quan Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d71a492c87a3..b29e996df1d4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -5252,13 +5252,11 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_ PHM_PerformanceLevel *level) { const struct vega10_power_state *ps; - struct vega10_hwmgr *data; uint32_t i; if (level == NULL || hwmgr == NULL || state == NULL) return -EINVAL; - data = hwmgr->backend; ps = cast_const_phw_vega10_power_state(state); i = index > ps->performance_level_count - 1 ? -- cgit From 39a502c882e5a571041bb2e2b5b2c7f883c6505d Mon Sep 17 00:00:00 2001 From: zhengbin Date: Mon, 11 Nov 2019 17:33:13 +0800 Subject: drm/amd/display: Use static const, not const static Move the static keyword to the front of declarations. Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 1fa255e077d0..7f796a4c73d4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -69,7 +69,7 @@ #define DC_LOGGER \ dc->ctx->logger -const static char DC_BUILD_ID[] = "production-build"; +static const char DC_BUILD_ID[] = "production-build"; /** * DOC: Overview -- cgit From f5ac1595156a8b63812ed6fa0803ddf7207cced7 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Mon, 11 Nov 2019 12:09:28 +0800 Subject: drm/amd/powerplay: remove set but not used variable 'threshold', 'state' Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c: In function fiji_populate_single_graphic_level: drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c:943:11: warning: variable threshold set but not used [-Wunused-but-set-variable] drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c: In function fiji_populate_memory_timing_parameters: drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c:1504:8: warning: variable state set but not used [-Wunused-but-set-variable] They are introduced by commit 2e112b4ae3ba ("drm/amd/pp: remove fiji_smc/smumgr split."), but never used, so remove it. Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index da025b1d302d..32ebb383c456 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -940,7 +940,7 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, { int result; /* PP_Clocks minClocks; */ - uint32_t threshold, mvdd; + uint32_t mvdd; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -973,8 +973,6 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, level->VoltageDownHyst = 0; level->PowerThrottle = 0; - threshold = clock * data->fast_watermark_threshold / 100; - data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) @@ -1501,7 +1499,7 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, uint32_t dram_timing; uint32_t dram_timing2; uint32_t burstTime; - ULONG state, trrds, trrdl; + ULONG trrds, trrdl; int result; result = atomctrl_set_engine_dram_timings_rv770(hwmgr, @@ -1513,7 +1511,6 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME); - state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0); trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0); trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0); -- cgit From 026674cf055f15da0fa92fbef293d5852346380d Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 9 Nov 2019 17:37:25 +0800 Subject: drm/amd/display: remove set but not used variable 'ds_port' Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c: In function dp_wa_power_up_0010FA: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c:2320:35: warning: variable ds_port set but not used [-Wunused-but-set-variable] It is never used, so can be removed. Signed-off-by: YueHaibing Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 65de32fbcc83..b814b749724b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2910,7 +2910,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, int length) { int retry = 0; - union dp_downstream_port_present ds_port = { 0 }; if (!link->dpcd_caps.dpcd_rev.raw) { do { @@ -2923,9 +2922,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); } - ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - - DP_DPCD_REV]; - if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { switch (link->dpcd_caps.branch_dev_id) { /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down -- cgit From 339903fa989b7a0c9fc26e0f35e36df7e110d737 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Tue, 15 Jan 2019 13:16:34 -0500 Subject: drm/amdkfd: Use better name to indicate the offset is in dwords The doorbell offset could mean the byte offset or the dword offset, and the 0 offset place is also different, sometimes the start of PCI doorbell bar or the start of process doorbell pages. Use better name to avoid confusion. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 14 +++++++------- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 9 +++++---- 3 files changed, 13 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 984c2f2b24b6..b42f34ef2b5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -170,7 +170,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) } q->properties.doorbell_off = - kfd_doorbell_id_to_offset(dev, q->process, + kfd_get_doorbell_dw_offset_in_bar(dev, q->process, q->doorbell_id); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index ebe79bf00145..8e0c00b9555e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -91,7 +91,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd) kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address + doorbell_start_offset; - kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32); + kfd->doorbell_base_dw_offset = doorbell_start_offset / sizeof(u32); kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base, kfd_doorbell_process_slice(kfd)); @@ -103,8 +103,8 @@ int kfd_doorbell_init(struct kfd_dev *kfd) pr_debug("doorbell base == 0x%08lX\n", (uintptr_t)kfd->doorbell_base); - pr_debug("doorbell_id_offset == 0x%08lX\n", - kfd->doorbell_id_offset); + pr_debug("doorbell_base_dw_offset == 0x%08lX\n", + kfd->doorbell_base_dw_offset); pr_debug("doorbell_process_limit == 0x%08lX\n", doorbell_process_limit); @@ -185,7 +185,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, * Calculating the kernel doorbell offset using the first * doorbell page. */ - *doorbell_off = kfd->doorbell_id_offset + inx; + *doorbell_off = kfd->doorbell_base_dw_offset + inx; pr_debug("Get kernel queue doorbell\n" " doorbell offset == 0x%08X\n" @@ -225,17 +225,17 @@ void write_kernel_doorbell64(void __iomem *db, u64 value) } } -unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd, +unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, struct kfd_process *process, unsigned int doorbell_id) { /* - * doorbell_id_offset accounts for doorbells taken by KGD. + * doorbell_base_dw_offset accounts for doorbells taken by KGD. * index * kfd_doorbell_process_slice/sizeof(u32) adjusts to * the process's doorbells. The offset returned is in dword * units regardless of the ASIC-dependent doorbell size. */ - return kfd->doorbell_id_offset + + return kfd->doorbell_base_dw_offset + process->doorbell_index * kfd_doorbell_process_slice(kfd) / sizeof(u32) + doorbell_id * kfd->device_info->doorbell_size / sizeof(u32); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 3f7c3d651a52..603d793a8f56 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -235,9 +235,10 @@ struct kfd_dev { * KFD. It is aligned for mapping * into user mode */ - size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell - * to HW doorbell, GFX reserved some - * at the start) + size_t doorbell_base_dw_offset; /* Offset from the start of the PCI + * doorbell BAR to the first KFD + * doorbell in dwords. GFX reserves + * the segment before this offset. */ u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells * page used by kernel queue @@ -815,7 +816,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); u32 read_kernel_doorbell(u32 __iomem *db); void write_kernel_doorbell(void __iomem *db, u32 value); void write_kernel_doorbell64(void __iomem *db, u64 value); -unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd, +unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, struct kfd_process *process, unsigned int doorbell_id); phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, -- cgit From e47a8b5223033e4dea5e68a228783e3c5ae0b623 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Tue, 15 Jan 2019 13:58:57 -0500 Subject: drm/amdkfd: Avoid using doorbell_off as offset in process doorbell pages dorbell_off in the queue properties is mainly used for the doorbell dw offset in pci bar. We should not set it to the doorbell byte offset in process doorbell pages. This makes the code much easier to read. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 12 ++++++------ drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 10 +++++++--- 4 files changed, 16 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 568989b7947f..1041f4d627a6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -258,6 +258,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, unsigned int queue_id; struct kfd_process_device *pdd; struct queue_properties q_properties; + uint32_t doorbell_offset_in_process = 0; memset(&q_properties, 0, sizeof(struct queue_properties)); @@ -286,7 +287,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, p->pasid, dev->id); - err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id); + err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, + &doorbell_offset_in_process); if (err != 0) goto err_create_queue; @@ -297,12 +299,10 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); if (KFD_IS_SOC15(dev->device_info->asic_family)) - /* On SOC15 ASICs, doorbell allocation must be - * per-device, and independent from the per-process - * queue_id. Return the doorbell offset within the - * doorbell aperture to user mode. + /* On SOC15 ASICs, include the doorbell offset within the + * process doorbell frame, which is 2 pages. */ - args->doorbell_offset |= q_properties.doorbell_off; + args->doorbell_offset |= doorbell_offset_in_process; mutex_unlock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index d59f2cd056c6..1d33c4f25263 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -185,7 +185,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev) properties.type = KFD_QUEUE_TYPE_DIQ; status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL, - &properties, &qid); + &properties, &qid, NULL); if (status) { pr_err("Failed to create DIQ\n"); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 603d793a8f56..8ed691fb345a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -902,7 +902,8 @@ int pqm_create_queue(struct process_queue_manager *pqm, struct kfd_dev *dev, struct file *f, struct queue_properties *properties, - unsigned int *qid); + unsigned int *qid, + uint32_t *p_doorbell_offset_in_process); int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, struct queue_properties *p); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 2659d226c056..8bc69689f9b8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -192,7 +192,8 @@ int pqm_create_queue(struct process_queue_manager *pqm, struct kfd_dev *dev, struct file *f, struct queue_properties *properties, - unsigned int *qid) + unsigned int *qid, + uint32_t *p_doorbell_offset_in_process) { int retval; struct kfd_process_device *pdd; @@ -303,12 +304,15 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } - if (q) + if (q && p_doorbell_offset_in_process) /* Return the doorbell offset within the doorbell page * to the caller so it can be passed up to user mode * (in bytes). + * There are always 1024 doorbells per process, so in case + * of 8-byte doorbells, there are two doorbell pages per + * process. */ - properties->doorbell_off = + *p_doorbell_offset_in_process = (q->properties.doorbell_off * sizeof(uint32_t)) & (kfd_doorbell_process_slice(dev) - 1); -- cgit From 2d030d3e97a6da66d9c6fa558e01571ec14a92b0 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Tue, 15 Jan 2019 19:23:16 -0500 Subject: drm/amdkfd: Rename create_cp_queue() to init_user_queue() create_cp_queue() could also work with SDMA queues, so we should rename it. It only initialize the data values rather than creating queues. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 8bc69689f9b8..1152490bbf53 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -162,7 +162,7 @@ void pqm_uninit(struct process_queue_manager *pqm) pqm->queue_slot_bitmap = NULL; } -static int create_cp_queue(struct process_queue_manager *pqm, +static int init_user_queue(struct process_queue_manager *pqm, struct kfd_dev *dev, struct queue **q, struct queue_properties *q_properties, struct file *f, unsigned int qid) @@ -251,7 +251,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } - retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); + retval = init_user_queue(pqm, dev, &q, properties, f, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -272,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } - retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); + retval = init_user_queue(pqm, dev, &q, properties, f, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; -- cgit From 4d428e912bb2634e6ba3f7ce8309517a12419174 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 8 Nov 2019 21:15:48 -0500 Subject: drm/amdkfd: Implement queue priority controls for gfx10 Ported from gfx9. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 4a236b2c2354..4884cd6c65ce 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -66,6 +66,12 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, m->compute_static_thread_mgmt_se3); } +static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q) +{ + m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; + m->cp_hqd_queue_priority = q->priority; +} + static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, struct queue_properties *q) { @@ -109,9 +115,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; - m->cp_hqd_pipe_priority = 1; - m->cp_hqd_queue_priority = 15; - if (q->format == KFD_QUEUE_FORMAT_AQL) { m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; @@ -208,6 +211,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, q); + set_priority(m, q); q->is_active = (q->queue_size > 0 && q->queue_address != 0 && -- cgit From 681a9167ddb2d5f5fd9821f5e484e51b31a073bb Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 8 Nov 2019 22:54:07 -0500 Subject: drm/amdkfd: Update get_wave_state() for GFX10 Given control stack is now in the userspace context save restore area on GFX10, the same as GFX8, it is not needed to copy it back to userspace. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 4884cd6c65ce..954dc8ac4ff1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -251,18 +251,22 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, { struct v10_compute_mqd *m; - /* Control stack is located one page after MQD. */ - void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); - m = get_mqd(mqd); + /* Control stack is written backwards, while workgroup context data + * is written forwards. Both starts from m->cp_hqd_cntl_stack_size. + * Current position is at m->cp_hqd_cntl_stack_offset and + * m->cp_hqd_wg_state_offset, respectively. + */ *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - m->cp_hqd_cntl_stack_offset; *save_area_used_size = m->cp_hqd_wg_state_offset - m->cp_hqd_cntl_stack_size; - if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size)) - return -EFAULT; + /* Control stack is not copied to user mode for GFXv10 because + * it's part of the context save area that is already + * accessible to user mode + */ return 0; } -- cgit From bc05b0ec150983ba3b3c1a1c6b5eeb6c2918c94a Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 8 Nov 2019 23:03:29 -0500 Subject: drm/amdkfd: Fix a bug when calculating save_area_used_size workgroup context data writes from m->cp_hqd_cntl_stack_size, so we should deduct it when calculating the used size. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index d3380c5bdbde..be27ff01cdb8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -302,7 +302,8 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - m->cp_hqd_cntl_stack_offset; - *save_area_used_size = m->cp_hqd_wg_state_offset; + *save_area_used_size = m->cp_hqd_wg_state_offset - + m->cp_hqd_cntl_stack_size; if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size)) return -EFAULT; -- cgit From 2a7f8883f4cafa2189001dec3b3a421e3184bc35 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 8 Nov 2019 21:52:55 -0500 Subject: drm/amdkfd: Use QUEUE_IS_ACTIVE macro in mqd v10 This is done for other GFX in commit bb2d2128a54c4. Port it to GFX10. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 954dc8ac4ff1..46ddb33b624a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -213,10 +213,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, update_cu_mask(mm, mqd, q); set_priority(m, q); - q->is_active = (q->queue_size > 0 && - q->queue_address != 0 && - q->queue_percent > 0 && - !q->is_evicted); + q->is_active = QUEUE_IS_ACTIVE(*q); } static int destroy_mqd(struct mqd_manager *mm, void *mqd, @@ -348,11 +345,7 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, m->sdma_queue_id = q->sdma_queue_id; m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; - - q->is_active = (q->queue_size > 0 && - q->queue_address != 0 && - q->queue_percent > 0 && - !q->is_evicted); + q->is_active = QUEUE_IS_ACTIVE(*q); } /* -- cgit From 8c27a0c451803ce75a9027bac5138582cee5c335 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 8 Nov 2019 22:06:37 -0500 Subject: drm/amdkfd: Stop using GFP_NOIO explicitly for two places Adapt the change from: 1cd106ecfc1f04 ("drm/amdkfd: Stop using GFP_NOIO explicitly") Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 46ddb33b624a..579c5ffcfa79 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -393,7 +393,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) return NULL; - mqd = kzalloc(sizeof(*mqd), GFP_NOIO); + mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); if (!mqd) return NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index be27ff01cdb8..22a819c888d8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -92,7 +92,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, * instead of sub-allocation function. */ if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { - mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); + mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); if (!mqd_mem_obj) return NULL; retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, -- cgit From 025916c914d1efe1e077759d34e4bde25f10ae80 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 12 Nov 2019 10:10:50 +0800 Subject: drm/amd/display: remove set but not used variable 'bpc' Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c: In function get_pbn_from_timing: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c:2364:11: warning: variable bpc set but not used [-Wunused-but-set-variable] It is not used since commit e49f69363adf ("drm/amd/display: use proper formula to calculate bandwidth from timing"), this also remove get_color_depth(), which is only used here. Signed-off-by: YueHaibing Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index bdc8be373ff0..1be427759771 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2638,28 +2638,13 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) return dc_fixpt_div_int(mbytes_per_sec, 54); } -static int get_color_depth(enum dc_color_depth color_depth) -{ - switch (color_depth) { - case COLOR_DEPTH_666: return 6; - case COLOR_DEPTH_888: return 8; - case COLOR_DEPTH_101010: return 10; - case COLOR_DEPTH_121212: return 12; - case COLOR_DEPTH_141414: return 14; - case COLOR_DEPTH_161616: return 16; - default: return 0; - } -} - static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) { - uint32_t bpc; uint64_t kbps; struct fixed31_32 peak_kbps; uint32_t numerator; uint32_t denominator; - bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth); kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); /* -- cgit From 747a397d394fac0001e4b3c03d7dce3a118af567 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Wed, 13 Nov 2019 20:44:28 +0800 Subject: drm/amdgpu: remove set but not used variable 'mc_shared_chmap' from 'gfx_v6_0.c' and 'gfx_v7_0.c' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c: In function ‘gfx_v6_0_constants_init’: drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c:1579:6: warning: variable ‘mc_shared_chmap’ set but not used [-Wunused-but-set-variable] drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c: In function ‘gfx_v7_0_gpu_early_init’: drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c:4262:6: warning: variable ‘mc_shared_chmap’ set but not used [-Wunused-but-set-variable] Fixes: 2cd46ad22383 ("drm/amdgpu: add graphic pipeline implementation for si v8") Fixes: d93f3ca706b8 ("drm/amdgpu/gfx7: rework gpu_init()") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 3 +-- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 7f0a63628c43..31f44d05e606 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1576,7 +1576,7 @@ static void gfx_v6_0_config_init(struct amdgpu_device *adev) static void gfx_v6_0_constants_init(struct amdgpu_device *adev) { u32 gb_addr_config = 0; - u32 mc_shared_chmap, mc_arb_ramcfg; + u32 mc_arb_ramcfg; u32 sx_debug_1; u32 hdp_host_path_cntl; u32 tmp; @@ -1678,7 +1678,6 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev) WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 791ba398f007..58b7ef97bff5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4258,7 +4258,7 @@ static int gfx_v7_0_late_init(void *handle) static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; - u32 mc_shared_chmap, mc_arb_ramcfg; + u32 mc_arb_ramcfg; u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; u32 tmp; @@ -4335,7 +4335,6 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) break; } - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; -- cgit From 4f2922d12d6c63d0f4aa4e859ad95aee6d0d4ea0 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Wed, 13 Nov 2019 20:44:29 +0800 Subject: drm/amdgpu: remove set but not used variable 'amdgpu_connector' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdgpu/amdgpu_display.c: In function ‘amdgpu_display_crtc_scaling_mode_fixup’: drivers/gpu/drm/amd/amdgpu/amdgpu_display.c:693:27: warning: variable ‘amdgpu_connector’ set but not used [-Wunused-but-set-variable] Fixes: d38ceaf99ed0 ("drm/amdgpu: add core driver (v4)") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index d2dd59a95e8a..6a27027a6f20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -690,7 +690,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_encoder *amdgpu_encoder; struct drm_connector *connector; - struct amdgpu_connector *amdgpu_connector; u32 src_v = 1, dst_v = 1; u32 src_h = 1, dst_h = 1; @@ -702,7 +701,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, continue; amdgpu_encoder = to_amdgpu_encoder(encoder); connector = amdgpu_get_connector_for_encoder(encoder); - amdgpu_connector = to_amdgpu_connector(connector); /* set scaling */ if (amdgpu_encoder->rmx_type == RMX_OFF) -- cgit From a1bd079fca6219e18bb0892f0a7228a76dd6292c Mon Sep 17 00:00:00 2001 From: yu kuai Date: Wed, 13 Nov 2019 20:44:30 +0800 Subject: drm/amdgpu: remove set but not used variable 'count' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdkfd/kfd_device.c: In function ‘kgd2kfd_post_reset’: drivers/gpu/drm/amd/amdkfd/kfd_device.c:745:11: warning: variable ‘count’ set but not used [-Wunused-but-set-variable] 'count' is never used, so can be removed. Thus 'atomic_dec_return' can be replaced as 'atomic_dec' Fixes: e42051d2133b ("drm/amdkfd: Implement GPU reset handlers in KFD") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 4fa8834ce7cb..209bfc849352 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -742,7 +742,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) int kgd2kfd_post_reset(struct kfd_dev *kfd) { - int ret, count; + int ret; if (!kfd->init_complete) return 0; @@ -750,7 +750,7 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) ret = kfd_resume(kfd); if (ret) return ret; - count = atomic_dec_return(&kfd_locked); + atomic_dec(&kfd_locked); atomic_set(&kfd->sram_ecc_flag, 0); -- cgit From 9e089a29c696d86d26e79737bafbce94738fb462 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Wed, 13 Nov 2019 20:44:31 +0800 Subject: drm/amdgpu: remove set but not used variable 'invalid' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c: In function ‘amdgpu_amdkfd_evict_userptr’: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:1665:6: warning: variable ‘invalid’ set but not used [-Wunused-but-set-variable] 'invalid' is never used, so can be removed. Thus 'atomic_inc_return' can be replaced as 'atomic_inc' Fixes: 5ae0283e831a ("drm/amdgpu: Add userptr support for KFD") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index ae6f5446262c..a1ed8a8e3752 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1662,10 +1662,10 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) { struct amdkfd_process_info *process_info = mem->process_info; - int invalid, evicted_bos; + int evicted_bos; int r = 0; - invalid = atomic_inc_return(&mem->invalid); + atomic_inc(&mem->invalid); evicted_bos = atomic_inc_return(&process_info->evicted_bos); if (evicted_bos == 1) { /* First eviction, stop the queues */ -- cgit From 472b36a2ab67880e89d6b0cd0e243830e8cb75e1 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Wed, 13 Nov 2019 20:44:34 +0800 Subject: drm/amd/powerplay: remove set but not used variable 'us_mvdd' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c: In function ‘vegam_populate_smc_acpi_level’: drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c:1117:11: warning: variable 'us_mvdd' set but not used [-Wunused-but-set-variable] It is never used, so can be removed. Fixes: ac7822b0026f ("drm/amd/powerplay: add smumgr support for VEGAM (v2)") Signed-off-by: yu kuai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index ae18fbcb26fb..2068eb00d2f8 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -1114,7 +1114,6 @@ static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, (struct phm_ppt_v1_information *)(hwmgr->pptable); SMIO_Pattern vol_level; uint32_t mvdd; - uint16_t us_mvdd; table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; @@ -1168,17 +1167,6 @@ static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, "in Clock Dependency Table", ); - us_mvdd = 0; - if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || - (data->mclk_dpm_key_disabled)) - us_mvdd = data->vbios_boot_state.mvdd_bootup_value; - else { - if (!vegam_populate_mvdd_value(hwmgr, - data->dpm_table.mclk_table.dpm_levels[0].value, - &vol_level)) - us_mvdd = vol_level.Voltage; - } - if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level)) table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); else -- cgit From 84ce6c48675b55e937814e7c688c46ab15242b8a Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Thu, 7 Nov 2019 23:18:04 -0500 Subject: drm/amdkfd: Merge CIK kernel queue functions into VI The only difference that CIK kernel queue functions are different from VI is avoid allocating eop_mem. We can achieve that by using a if condition. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/Makefile | 1 - drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 7 +-- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 1 - drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c | 53 ----------------------- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 7 +++ 5 files changed, 9 insertions(+), 60 deletions(-) delete mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 017a8b7156da..f93a16372325 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -38,7 +38,6 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_mqd_manager_v9.o \ $(AMDKFD_PATH)/kfd_mqd_manager_v10.o \ $(AMDKFD_PATH)/kfd_kernel_queue.o \ - $(AMDKFD_PATH)/kfd_kernel_queue_cik.o \ $(AMDKFD_PATH)/kfd_kernel_queue_vi.o \ $(AMDKFD_PATH)/kfd_kernel_queue_v9.o \ $(AMDKFD_PATH)/kfd_packet_manager.o \ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 0d966408ea87..a750b1d110eb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -311,6 +311,8 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, kq->ops.rollback_packet = rollback_packet; switch (dev->device_info->asic_family) { + case CHIP_KAVERI: + case CHIP_HAWAII: case CHIP_CARRIZO: case CHIP_TONGA: case CHIP_FIJI: @@ -321,11 +323,6 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, kernel_queue_init_vi(&kq->ops_asic_specific); break; - case CHIP_KAVERI: - case CHIP_HAWAII: - kernel_queue_init_cik(&kq->ops_asic_specific); - break; - case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index a7116a939029..a9a35897d8b7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -99,7 +99,6 @@ struct kernel_queue { struct list_head list; }; -void kernel_queue_init_cik(struct kernel_queue_ops *ops); void kernel_queue_init_vi(struct kernel_queue_ops *ops); void kernel_queue_init_v9(struct kernel_queue_ops *ops); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c deleted file mode 100644 index 19e54acb4125..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "kfd_kernel_queue.h" - -static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size); -static void uninitialize_cik(struct kernel_queue *kq); -static void submit_packet_cik(struct kernel_queue *kq); - -void kernel_queue_init_cik(struct kernel_queue_ops *ops) -{ - ops->initialize = initialize_cik; - ops->uninitialize = uninitialize_cik; - ops->submit_packet = submit_packet_cik; -} - -static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size) -{ - return true; -} - -static void uninitialize_cik(struct kernel_queue *kq) -{ -} - -static void submit_packet_cik(struct kernel_queue *kq) -{ - *kq->wptr_kernel = kq->pending_wptr; - write_kernel_doorbell(kq->queue->properties.doorbell_ptr, - kq->pending_wptr); -} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c index 2adaf40027eb..5f5c8d73570d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c @@ -43,6 +43,10 @@ static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, { int retval; + /*For CIK family asics, kq->eop_mem is not needed */ + if (dev->device_info->asic_family <= CHIP_MULLINS) + return true; + retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); if (retval != 0) return false; @@ -57,6 +61,9 @@ static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, static void uninitialize_vi(struct kernel_queue *kq) { + /* For CIK family asics, kq->eop_mem is Null, kfd_gtt_sa_free() + * is able to handle NULL properly. + */ kfd_gtt_sa_free(kq->dev, kq->eop_mem); } -- cgit From ccdef35d0714f620ca70b23b6000cbb630efea1b Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Thu, 7 Nov 2019 23:59:43 -0500 Subject: drm/amdkfd: Eliminate ops_asic_specific in kernel queue The ops_asic_specific function pointers are actually quite generic after using a simple if condition. Eliminate it by code refactoring. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 63 ++++++++++-------------- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 4 -- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 36 -------------- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 48 ------------------ 4 files changed, 26 insertions(+), 125 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index a750b1d110eb..59ee9053498c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -87,9 +87,17 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->pq_kernel_addr = kq->pq->cpu_ptr; kq->pq_gpu_addr = kq->pq->gpu_addr; - retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size); - if (!retval) - goto err_eop_allocate_vidmem; + /* For CIK family asics, kq->eop_mem is not needed */ + if (dev->device_info->asic_family > CHIP_HAWAII) { + retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); + if (retval != 0) + goto err_eop_allocate_vidmem; + + kq->eop_gpu_addr = kq->eop_mem->gpu_addr; + kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; + + memset(kq->eop_kernel_addr, 0, PAGE_SIZE); + } retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel), &kq->rptr_mem); @@ -200,7 +208,12 @@ static void uninitialize(struct kernel_queue *kq) kfd_gtt_sa_free(kq->dev, kq->rptr_mem); kfd_gtt_sa_free(kq->dev, kq->wptr_mem); - kq->ops_asic_specific.uninitialize(kq); + + /* For CIK family asics, kq->eop_mem is Null, kfd_gtt_sa_free() + * is able to handle NULL properly. + */ + kfd_gtt_sa_free(kq->dev, kq->eop_mem); + kfd_gtt_sa_free(kq->dev, kq->pq); kfd_release_kernel_doorbell(kq->dev, kq->queue->properties.doorbell_ptr); @@ -280,8 +293,15 @@ static void submit_packet(struct kernel_queue *kq) } pr_debug("\n"); #endif - - kq->ops_asic_specific.submit_packet(kq); + if (kq->dev->device_info->doorbell_size == 8) { + *kq->wptr64_kernel = kq->pending_wptr64; + write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, + kq->pending_wptr64); + } else { + *kq->wptr_kernel = kq->pending_wptr; + write_kernel_doorbell(kq->queue->properties.doorbell_ptr, + kq->pending_wptr); + } } static void rollback_packet(struct kernel_queue *kq) @@ -310,42 +330,11 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, kq->ops.submit_packet = submit_packet; kq->ops.rollback_packet = rollback_packet; - switch (dev->device_info->asic_family) { - case CHIP_KAVERI: - case CHIP_HAWAII: - case CHIP_CARRIZO: - case CHIP_TONGA: - case CHIP_FIJI: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - case CHIP_VEGAM: - kernel_queue_init_vi(&kq->ops_asic_specific); - break; - - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - kernel_queue_init_v9(&kq->ops_asic_specific); - break; - default: - WARN(1, "Unexpected ASIC family %u", - dev->device_info->asic_family); - goto out_free; - } - if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) return kq; pr_err("Failed to init kernel queue\n"); -out_free: kfree(kq); return NULL; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index a9a35897d8b7..475e9499c0af 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -66,7 +66,6 @@ struct kernel_queue_ops { struct kernel_queue { struct kernel_queue_ops ops; - struct kernel_queue_ops ops_asic_specific; /* data */ struct kfd_dev *dev; @@ -99,7 +98,4 @@ struct kernel_queue { struct list_head list; }; -void kernel_queue_init_vi(struct kernel_queue_ops *ops); -void kernel_queue_init_v9(struct kernel_queue_ops *ops); - #endif /* KFD_KERNEL_QUEUE_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c index 9e0eaf446bab..2de01009f1b6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c @@ -27,42 +27,6 @@ #include "kfd_pm4_opcodes.h" #include "gc/gc_10_1_0_sh_mask.h" -static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size) -{ - int retval; - - retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); - if (retval) - return false; - - kq->eop_gpu_addr = kq->eop_mem->gpu_addr; - kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; - - memset(kq->eop_kernel_addr, 0, PAGE_SIZE); - - return true; -} - -static void uninitialize_v9(struct kernel_queue *kq) -{ - kfd_gtt_sa_free(kq->dev, kq->eop_mem); -} - -static void submit_packet_v9(struct kernel_queue *kq) -{ - *kq->wptr64_kernel = kq->pending_wptr64; - write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, - kq->pending_wptr64); -} - -void kernel_queue_init_v9(struct kernel_queue_ops *ops) -{ - ops->initialize = initialize_v9; - ops->uninitialize = uninitialize_v9; - ops->submit_packet = submit_packet_v9; -} - static int pm_map_process_v9(struct packet_manager *pm, uint32_t *buffer, struct qcm_process_device *qpd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c index 5f5c8d73570d..bed4d0ccb6b1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c @@ -26,54 +26,6 @@ #include "kfd_pm4_headers_vi.h" #include "kfd_pm4_opcodes.h" -static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size); -static void uninitialize_vi(struct kernel_queue *kq); -static void submit_packet_vi(struct kernel_queue *kq); - -void kernel_queue_init_vi(struct kernel_queue_ops *ops) -{ - ops->initialize = initialize_vi; - ops->uninitialize = uninitialize_vi; - ops->submit_packet = submit_packet_vi; -} - -static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size) -{ - int retval; - - /*For CIK family asics, kq->eop_mem is not needed */ - if (dev->device_info->asic_family <= CHIP_MULLINS) - return true; - - retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); - if (retval != 0) - return false; - - kq->eop_gpu_addr = kq->eop_mem->gpu_addr; - kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; - - memset(kq->eop_kernel_addr, 0, PAGE_SIZE); - - return true; -} - -static void uninitialize_vi(struct kernel_queue *kq) -{ - /* For CIK family asics, kq->eop_mem is Null, kfd_gtt_sa_free() - * is able to handle NULL properly. - */ - kfd_gtt_sa_free(kq->dev, kq->eop_mem); -} - -static void submit_packet_vi(struct kernel_queue *kq) -{ - *kq->wptr_kernel = kq->pending_wptr; - write_kernel_doorbell(kq->queue->properties.doorbell_ptr, - kq->pending_wptr); -} - unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size) { union PM4_MES_TYPE_3_HEADER header; -- cgit From 594d0c90a48e9e661564d3584cda0a7c363e1611 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Wed, 13 Nov 2019 17:03:11 -0500 Subject: drm/amdkfd: Rename kfd_kernel_queue_*.c to kfd_packet_manager_*.c After the recent cleanup, the functionalities provided by the previous kfd_kernel_queue_*.c are actually all packet manager related. So rename them to reflect that. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/Makefile | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 326 -------------------- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 334 --------------------- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c | 326 ++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c | 334 +++++++++++++++++++++ 5 files changed, 662 insertions(+), 662 deletions(-) delete mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c delete mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index f93a16372325..61474627a32c 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -38,9 +38,9 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_mqd_manager_v9.o \ $(AMDKFD_PATH)/kfd_mqd_manager_v10.o \ $(AMDKFD_PATH)/kfd_kernel_queue.o \ - $(AMDKFD_PATH)/kfd_kernel_queue_vi.o \ - $(AMDKFD_PATH)/kfd_kernel_queue_v9.o \ $(AMDKFD_PATH)/kfd_packet_manager.o \ + $(AMDKFD_PATH)/kfd_packet_manager_vi.o \ + $(AMDKFD_PATH)/kfd_packet_manager_v9.o \ $(AMDKFD_PATH)/kfd_process_queue_manager.o \ $(AMDKFD_PATH)/kfd_device_queue_manager.o \ $(AMDKFD_PATH)/kfd_device_queue_manager_cik.o \ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c deleted file mode 100644 index 2de01009f1b6..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Copyright 2016-2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "kfd_kernel_queue.h" -#include "kfd_device_queue_manager.h" -#include "kfd_pm4_headers_ai.h" -#include "kfd_pm4_opcodes.h" -#include "gc/gc_10_1_0_sh_mask.h" - -static int pm_map_process_v9(struct packet_manager *pm, - uint32_t *buffer, struct qcm_process_device *qpd) -{ - struct pm4_mes_map_process *packet; - uint64_t vm_page_table_base_addr = qpd->page_table_base; - - packet = (struct pm4_mes_map_process *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_map_process)); - - packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, - sizeof(struct pm4_mes_map_process)); - packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; - packet->bitfields2.process_quantum = 1; - packet->bitfields2.pasid = qpd->pqm->process->pasid; - packet->bitfields14.gds_size = qpd->gds_size & 0x3F; - packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF; - packet->bitfields14.num_gws = qpd->num_gws; - packet->bitfields14.num_oac = qpd->num_oac; - packet->bitfields14.sdma_enable = 1; - packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; - - packet->sh_mem_config = qpd->sh_mem_config; - packet->sh_mem_bases = qpd->sh_mem_bases; - if (qpd->tba_addr) { - packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); - /* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is - * not defined, so setting it won't do any harm. - */ - packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8) - | 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT; - - packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); - packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); - } - - packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); - packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); - - packet->vm_context_page_table_base_addr_lo32 = - lower_32_bits(vm_page_table_base_addr); - packet->vm_context_page_table_base_addr_hi32 = - upper_32_bits(vm_page_table_base_addr); - - return 0; -} - -static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, - uint64_t ib, size_t ib_size_in_dwords, bool chain) -{ - struct pm4_mes_runlist *packet; - - int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; - - /* Determine the number of processes to map together to HW: - * it can not exceed the number of VMIDs available to the - * scheduler, and it is determined by the smaller of the number - * of processes in the runlist and kfd module parameter - * hws_max_conc_proc. - * Note: the arbitration between the number of VMIDs and - * hws_max_conc_proc has been done in - * kgd2kfd_device_init(). - */ - concurrent_proc_cnt = min(pm->dqm->processes_count, - kfd->max_proc_per_quantum); - - packet = (struct pm4_mes_runlist *)buffer; - - memset(buffer, 0, sizeof(struct pm4_mes_runlist)); - packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, - sizeof(struct pm4_mes_runlist)); - - packet->bitfields4.ib_size = ib_size_in_dwords; - packet->bitfields4.chain = chain ? 1 : 0; - packet->bitfields4.offload_polling = 0; - packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0; - packet->bitfields4.valid = 1; - packet->bitfields4.process_cnt = concurrent_proc_cnt; - packet->ordinal2 = lower_32_bits(ib); - packet->ib_base_hi = upper_32_bits(ib); - - return 0; -} - -static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer, - struct scheduling_resources *res) -{ - struct pm4_mes_set_resources *packet; - - packet = (struct pm4_mes_set_resources *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_set_resources)); - - packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES, - sizeof(struct pm4_mes_set_resources)); - - packet->bitfields2.queue_type = - queue_type__mes_set_resources__hsa_interface_queue_hiq; - packet->bitfields2.vmid_mask = res->vmid_mask; - packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; - packet->bitfields7.oac_mask = res->oac_mask; - packet->bitfields8.gds_heap_base = res->gds_heap_base; - packet->bitfields8.gds_heap_size = res->gds_heap_size; - - packet->gws_mask_lo = lower_32_bits(res->gws_mask); - packet->gws_mask_hi = upper_32_bits(res->gws_mask); - - packet->queue_mask_lo = lower_32_bits(res->queue_mask); - packet->queue_mask_hi = upper_32_bits(res->queue_mask); - - return 0; -} - -static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, - struct queue *q, bool is_static) -{ - struct pm4_mes_map_queues *packet; - bool use_static = is_static; - - packet = (struct pm4_mes_map_queues *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); - - packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, - sizeof(struct pm4_mes_map_queues)); - packet->bitfields2.num_queues = 1; - packet->bitfields2.queue_sel = - queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; - - packet->bitfields2.engine_sel = - engine_sel__mes_map_queues__compute_vi; - packet->bitfields2.gws_control_queue = q->gws ? 1 : 0; - packet->bitfields2.extended_engine_sel = - extended_engine_sel__mes_map_queues__legacy_engine_sel; - packet->bitfields2.queue_type = - queue_type__mes_map_queues__normal_compute_vi; - - switch (q->properties.type) { - case KFD_QUEUE_TYPE_COMPUTE: - if (use_static) - packet->bitfields2.queue_type = - queue_type__mes_map_queues__normal_latency_static_queue_vi; - break; - case KFD_QUEUE_TYPE_DIQ: - packet->bitfields2.queue_type = - queue_type__mes_map_queues__debug_interface_queue_vi; - break; - case KFD_QUEUE_TYPE_SDMA: - case KFD_QUEUE_TYPE_SDMA_XGMI: - use_static = false; /* no static queues under SDMA */ - if (q->properties.sdma_engine_id < 2) - packet->bitfields2.engine_sel = q->properties.sdma_engine_id + - engine_sel__mes_map_queues__sdma0_vi; - else { - packet->bitfields2.extended_engine_sel = - extended_engine_sel__mes_map_queues__sdma0_to_7_sel; - packet->bitfields2.engine_sel = q->properties.sdma_engine_id; - } - break; - default: - WARN(1, "queue type %d", q->properties.type); - return -EINVAL; - } - packet->bitfields3.doorbell_offset = - q->properties.doorbell_off; - - packet->mqd_addr_lo = - lower_32_bits(q->gart_mqd_addr); - - packet->mqd_addr_hi = - upper_32_bits(q->gart_mqd_addr); - - packet->wptr_addr_lo = - lower_32_bits((uint64_t)q->properties.write_ptr); - - packet->wptr_addr_hi = - upper_32_bits((uint64_t)q->properties.write_ptr); - - return 0; -} - -static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, - enum kfd_queue_type type, - enum kfd_unmap_queues_filter filter, - uint32_t filter_param, bool reset, - unsigned int sdma_engine) -{ - struct pm4_mes_unmap_queues *packet; - - packet = (struct pm4_mes_unmap_queues *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); - - packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, - sizeof(struct pm4_mes_unmap_queues)); - switch (type) { - case KFD_QUEUE_TYPE_COMPUTE: - case KFD_QUEUE_TYPE_DIQ: - packet->bitfields2.extended_engine_sel = - extended_engine_sel__mes_unmap_queues__legacy_engine_sel; - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__compute; - break; - case KFD_QUEUE_TYPE_SDMA: - case KFD_QUEUE_TYPE_SDMA_XGMI: - if (sdma_engine < 2) { - packet->bitfields2.extended_engine_sel = - extended_engine_sel__mes_unmap_queues__legacy_engine_sel; - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__sdma0 + sdma_engine; - } else { - packet->bitfields2.extended_engine_sel = - extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel; - packet->bitfields2.engine_sel = sdma_engine; - } - break; - default: - WARN(1, "queue type %d", type); - return -EINVAL; - } - - if (reset) - packet->bitfields2.action = - action__mes_unmap_queues__reset_queues; - else - packet->bitfields2.action = - action__mes_unmap_queues__preempt_queues; - - switch (filter) { - case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__perform_request_on_specified_queues; - packet->bitfields2.num_queues = 1; - packet->bitfields3b.doorbell_offset0 = filter_param; - break; - case KFD_UNMAP_QUEUES_FILTER_BY_PASID: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; - packet->bitfields3a.pasid = filter_param; - break; - case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__unmap_all_queues; - break; - case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: - /* in this case, we do not preempt static queues */ - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__unmap_all_non_static_queues; - break; - default: - WARN(1, "filter %d", filter); - return -EINVAL; - } - - return 0; - -} - -static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value) -{ - struct pm4_mes_query_status *packet; - - packet = (struct pm4_mes_query_status *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_query_status)); - - - packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, - sizeof(struct pm4_mes_query_status)); - - packet->bitfields2.context_id = 0; - packet->bitfields2.interrupt_sel = - interrupt_sel__mes_query_status__completion_status; - packet->bitfields2.command = - command__mes_query_status__fence_only_after_write_ack; - - packet->addr_hi = upper_32_bits((uint64_t)fence_address); - packet->addr_lo = lower_32_bits((uint64_t)fence_address); - packet->data_hi = upper_32_bits((uint64_t)fence_value); - packet->data_lo = lower_32_bits((uint64_t)fence_value); - - return 0; -} - -const struct packet_manager_funcs kfd_v9_pm_funcs = { - .map_process = pm_map_process_v9, - .runlist = pm_runlist_v9, - .set_resources = pm_set_resources_v9, - .map_queues = pm_map_queues_v9, - .unmap_queues = pm_unmap_queues_v9, - .query_status = pm_query_status_v9, - .release_mem = NULL, - .map_process_size = sizeof(struct pm4_mes_map_process), - .runlist_size = sizeof(struct pm4_mes_runlist), - .set_resources_size = sizeof(struct pm4_mes_set_resources), - .map_queues_size = sizeof(struct pm4_mes_map_queues), - .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), - .query_status_size = sizeof(struct pm4_mes_query_status), - .release_mem_size = 0, -}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c deleted file mode 100644 index bed4d0ccb6b1..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "kfd_kernel_queue.h" -#include "kfd_device_queue_manager.h" -#include "kfd_pm4_headers_vi.h" -#include "kfd_pm4_opcodes.h" - -unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size) -{ - union PM4_MES_TYPE_3_HEADER header; - - header.u32All = 0; - header.opcode = opcode; - header.count = packet_size / 4 - 2; - header.type = PM4_TYPE_3; - - return header.u32All; -} - -static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer, - struct qcm_process_device *qpd) -{ - struct pm4_mes_map_process *packet; - - packet = (struct pm4_mes_map_process *)buffer; - - memset(buffer, 0, sizeof(struct pm4_mes_map_process)); - - packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, - sizeof(struct pm4_mes_map_process)); - packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; - packet->bitfields2.process_quantum = 1; - packet->bitfields2.pasid = qpd->pqm->process->pasid; - packet->bitfields3.page_table_base = qpd->page_table_base; - packet->bitfields10.gds_size = qpd->gds_size; - packet->bitfields10.num_gws = qpd->num_gws; - packet->bitfields10.num_oac = qpd->num_oac; - packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; - - packet->sh_mem_config = qpd->sh_mem_config; - packet->sh_mem_bases = qpd->sh_mem_bases; - packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; - packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; - - packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base; - - packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); - packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); - - return 0; -} - -static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, - uint64_t ib, size_t ib_size_in_dwords, bool chain) -{ - struct pm4_mes_runlist *packet; - int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; - - if (WARN_ON(!ib)) - return -EFAULT; - - /* Determine the number of processes to map together to HW: - * it can not exceed the number of VMIDs available to the - * scheduler, and it is determined by the smaller of the number - * of processes in the runlist and kfd module parameter - * hws_max_conc_proc. - * Note: the arbitration between the number of VMIDs and - * hws_max_conc_proc has been done in - * kgd2kfd_device_init(). - */ - concurrent_proc_cnt = min(pm->dqm->processes_count, - kfd->max_proc_per_quantum); - - packet = (struct pm4_mes_runlist *)buffer; - - memset(buffer, 0, sizeof(struct pm4_mes_runlist)); - packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, - sizeof(struct pm4_mes_runlist)); - - packet->bitfields4.ib_size = ib_size_in_dwords; - packet->bitfields4.chain = chain ? 1 : 0; - packet->bitfields4.offload_polling = 0; - packet->bitfields4.valid = 1; - packet->bitfields4.process_cnt = concurrent_proc_cnt; - packet->ordinal2 = lower_32_bits(ib); - packet->bitfields3.ib_base_hi = upper_32_bits(ib); - - return 0; -} - -int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, - struct scheduling_resources *res) -{ - struct pm4_mes_set_resources *packet; - - packet = (struct pm4_mes_set_resources *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_set_resources)); - - packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES, - sizeof(struct pm4_mes_set_resources)); - - packet->bitfields2.queue_type = - queue_type__mes_set_resources__hsa_interface_queue_hiq; - packet->bitfields2.vmid_mask = res->vmid_mask; - packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; - packet->bitfields7.oac_mask = res->oac_mask; - packet->bitfields8.gds_heap_base = res->gds_heap_base; - packet->bitfields8.gds_heap_size = res->gds_heap_size; - - packet->gws_mask_lo = lower_32_bits(res->gws_mask); - packet->gws_mask_hi = upper_32_bits(res->gws_mask); - - packet->queue_mask_lo = lower_32_bits(res->queue_mask); - packet->queue_mask_hi = upper_32_bits(res->queue_mask); - - return 0; -} - -static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer, - struct queue *q, bool is_static) -{ - struct pm4_mes_map_queues *packet; - bool use_static = is_static; - - packet = (struct pm4_mes_map_queues *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); - - packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, - sizeof(struct pm4_mes_map_queues)); - packet->bitfields2.num_queues = 1; - packet->bitfields2.queue_sel = - queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; - - packet->bitfields2.engine_sel = - engine_sel__mes_map_queues__compute_vi; - packet->bitfields2.queue_type = - queue_type__mes_map_queues__normal_compute_vi; - - switch (q->properties.type) { - case KFD_QUEUE_TYPE_COMPUTE: - if (use_static) - packet->bitfields2.queue_type = - queue_type__mes_map_queues__normal_latency_static_queue_vi; - break; - case KFD_QUEUE_TYPE_DIQ: - packet->bitfields2.queue_type = - queue_type__mes_map_queues__debug_interface_queue_vi; - break; - case KFD_QUEUE_TYPE_SDMA: - case KFD_QUEUE_TYPE_SDMA_XGMI: - packet->bitfields2.engine_sel = q->properties.sdma_engine_id + - engine_sel__mes_map_queues__sdma0_vi; - use_static = false; /* no static queues under SDMA */ - break; - default: - WARN(1, "queue type %d", q->properties.type); - return -EINVAL; - } - packet->bitfields3.doorbell_offset = - q->properties.doorbell_off; - - packet->mqd_addr_lo = - lower_32_bits(q->gart_mqd_addr); - - packet->mqd_addr_hi = - upper_32_bits(q->gart_mqd_addr); - - packet->wptr_addr_lo = - lower_32_bits((uint64_t)q->properties.write_ptr); - - packet->wptr_addr_hi = - upper_32_bits((uint64_t)q->properties.write_ptr); - - return 0; -} - -static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, - enum kfd_queue_type type, - enum kfd_unmap_queues_filter filter, - uint32_t filter_param, bool reset, - unsigned int sdma_engine) -{ - struct pm4_mes_unmap_queues *packet; - - packet = (struct pm4_mes_unmap_queues *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); - - packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, - sizeof(struct pm4_mes_unmap_queues)); - switch (type) { - case KFD_QUEUE_TYPE_COMPUTE: - case KFD_QUEUE_TYPE_DIQ: - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__compute; - break; - case KFD_QUEUE_TYPE_SDMA: - case KFD_QUEUE_TYPE_SDMA_XGMI: - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__sdma0 + sdma_engine; - break; - default: - WARN(1, "queue type %d", type); - return -EINVAL; - } - - if (reset) - packet->bitfields2.action = - action__mes_unmap_queues__reset_queues; - else - packet->bitfields2.action = - action__mes_unmap_queues__preempt_queues; - - switch (filter) { - case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__perform_request_on_specified_queues; - packet->bitfields2.num_queues = 1; - packet->bitfields3b.doorbell_offset0 = filter_param; - break; - case KFD_UNMAP_QUEUES_FILTER_BY_PASID: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; - packet->bitfields3a.pasid = filter_param; - break; - case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__unmap_all_queues; - break; - case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: - /* in this case, we do not preempt static queues */ - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__unmap_all_non_static_queues; - break; - default: - WARN(1, "filter %d", filter); - return -EINVAL; - } - - return 0; - -} - -static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value) -{ - struct pm4_mes_query_status *packet; - - packet = (struct pm4_mes_query_status *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_query_status)); - - packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, - sizeof(struct pm4_mes_query_status)); - - packet->bitfields2.context_id = 0; - packet->bitfields2.interrupt_sel = - interrupt_sel__mes_query_status__completion_status; - packet->bitfields2.command = - command__mes_query_status__fence_only_after_write_ack; - - packet->addr_hi = upper_32_bits((uint64_t)fence_address); - packet->addr_lo = lower_32_bits((uint64_t)fence_address); - packet->data_hi = upper_32_bits((uint64_t)fence_value); - packet->data_lo = lower_32_bits((uint64_t)fence_value); - - return 0; -} - -static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer) -{ - struct pm4_mec_release_mem *packet; - - packet = (struct pm4_mec_release_mem *)buffer; - memset(buffer, 0, sizeof(*packet)); - - packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM, - sizeof(*packet)); - - packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; - packet->bitfields2.event_index = event_index___release_mem__end_of_pipe; - packet->bitfields2.tcl1_action_ena = 1; - packet->bitfields2.tc_action_ena = 1; - packet->bitfields2.cache_policy = cache_policy___release_mem__lru; - packet->bitfields2.atc = 0; - - packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low; - packet->bitfields3.int_sel = - int_sel___release_mem__send_interrupt_after_write_confirm; - - packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; - packet->address_hi = upper_32_bits(gpu_addr); - - packet->data_lo = 0; - - return 0; -} - -const struct packet_manager_funcs kfd_vi_pm_funcs = { - .map_process = pm_map_process_vi, - .runlist = pm_runlist_vi, - .set_resources = pm_set_resources_vi, - .map_queues = pm_map_queues_vi, - .unmap_queues = pm_unmap_queues_vi, - .query_status = pm_query_status_vi, - .release_mem = pm_release_mem_vi, - .map_process_size = sizeof(struct pm4_mes_map_process), - .runlist_size = sizeof(struct pm4_mes_runlist), - .set_resources_size = sizeof(struct pm4_mes_set_resources), - .map_queues_size = sizeof(struct pm4_mes_map_queues), - .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), - .query_status_size = sizeof(struct pm4_mes_query_status), - .release_mem_size = sizeof(struct pm4_mec_release_mem) -}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c new file mode 100644 index 000000000000..2de01009f1b6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -0,0 +1,326 @@ +/* + * Copyright 2016-2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "kfd_kernel_queue.h" +#include "kfd_device_queue_manager.h" +#include "kfd_pm4_headers_ai.h" +#include "kfd_pm4_opcodes.h" +#include "gc/gc_10_1_0_sh_mask.h" + +static int pm_map_process_v9(struct packet_manager *pm, + uint32_t *buffer, struct qcm_process_device *qpd) +{ + struct pm4_mes_map_process *packet; + uint64_t vm_page_table_base_addr = qpd->page_table_base; + + packet = (struct pm4_mes_map_process *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_map_process)); + + packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, + sizeof(struct pm4_mes_map_process)); + packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; + packet->bitfields2.process_quantum = 1; + packet->bitfields2.pasid = qpd->pqm->process->pasid; + packet->bitfields14.gds_size = qpd->gds_size & 0x3F; + packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF; + packet->bitfields14.num_gws = qpd->num_gws; + packet->bitfields14.num_oac = qpd->num_oac; + packet->bitfields14.sdma_enable = 1; + packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; + + packet->sh_mem_config = qpd->sh_mem_config; + packet->sh_mem_bases = qpd->sh_mem_bases; + if (qpd->tba_addr) { + packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); + /* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is + * not defined, so setting it won't do any harm. + */ + packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8) + | 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT; + + packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); + packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); + } + + packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); + packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); + + packet->vm_context_page_table_base_addr_lo32 = + lower_32_bits(vm_page_table_base_addr); + packet->vm_context_page_table_base_addr_hi32 = + upper_32_bits(vm_page_table_base_addr); + + return 0; +} + +static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, + uint64_t ib, size_t ib_size_in_dwords, bool chain) +{ + struct pm4_mes_runlist *packet; + + int concurrent_proc_cnt = 0; + struct kfd_dev *kfd = pm->dqm->dev; + + /* Determine the number of processes to map together to HW: + * it can not exceed the number of VMIDs available to the + * scheduler, and it is determined by the smaller of the number + * of processes in the runlist and kfd module parameter + * hws_max_conc_proc. + * Note: the arbitration between the number of VMIDs and + * hws_max_conc_proc has been done in + * kgd2kfd_device_init(). + */ + concurrent_proc_cnt = min(pm->dqm->processes_count, + kfd->max_proc_per_quantum); + + packet = (struct pm4_mes_runlist *)buffer; + + memset(buffer, 0, sizeof(struct pm4_mes_runlist)); + packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, + sizeof(struct pm4_mes_runlist)); + + packet->bitfields4.ib_size = ib_size_in_dwords; + packet->bitfields4.chain = chain ? 1 : 0; + packet->bitfields4.offload_polling = 0; + packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0; + packet->bitfields4.valid = 1; + packet->bitfields4.process_cnt = concurrent_proc_cnt; + packet->ordinal2 = lower_32_bits(ib); + packet->ib_base_hi = upper_32_bits(ib); + + return 0; +} + +static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer, + struct scheduling_resources *res) +{ + struct pm4_mes_set_resources *packet; + + packet = (struct pm4_mes_set_resources *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_set_resources)); + + packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES, + sizeof(struct pm4_mes_set_resources)); + + packet->bitfields2.queue_type = + queue_type__mes_set_resources__hsa_interface_queue_hiq; + packet->bitfields2.vmid_mask = res->vmid_mask; + packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; + packet->bitfields7.oac_mask = res->oac_mask; + packet->bitfields8.gds_heap_base = res->gds_heap_base; + packet->bitfields8.gds_heap_size = res->gds_heap_size; + + packet->gws_mask_lo = lower_32_bits(res->gws_mask); + packet->gws_mask_hi = upper_32_bits(res->gws_mask); + + packet->queue_mask_lo = lower_32_bits(res->queue_mask); + packet->queue_mask_hi = upper_32_bits(res->queue_mask); + + return 0; +} + +static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, + struct queue *q, bool is_static) +{ + struct pm4_mes_map_queues *packet; + bool use_static = is_static; + + packet = (struct pm4_mes_map_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); + + packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, + sizeof(struct pm4_mes_map_queues)); + packet->bitfields2.num_queues = 1; + packet->bitfields2.queue_sel = + queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; + + packet->bitfields2.engine_sel = + engine_sel__mes_map_queues__compute_vi; + packet->bitfields2.gws_control_queue = q->gws ? 1 : 0; + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_map_queues__legacy_engine_sel; + packet->bitfields2.queue_type = + queue_type__mes_map_queues__normal_compute_vi; + + switch (q->properties.type) { + case KFD_QUEUE_TYPE_COMPUTE: + if (use_static) + packet->bitfields2.queue_type = + queue_type__mes_map_queues__normal_latency_static_queue_vi; + break; + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.queue_type = + queue_type__mes_map_queues__debug_interface_queue_vi; + break; + case KFD_QUEUE_TYPE_SDMA: + case KFD_QUEUE_TYPE_SDMA_XGMI: + use_static = false; /* no static queues under SDMA */ + if (q->properties.sdma_engine_id < 2) + packet->bitfields2.engine_sel = q->properties.sdma_engine_id + + engine_sel__mes_map_queues__sdma0_vi; + else { + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_map_queues__sdma0_to_7_sel; + packet->bitfields2.engine_sel = q->properties.sdma_engine_id; + } + break; + default: + WARN(1, "queue type %d", q->properties.type); + return -EINVAL; + } + packet->bitfields3.doorbell_offset = + q->properties.doorbell_off; + + packet->mqd_addr_lo = + lower_32_bits(q->gart_mqd_addr); + + packet->mqd_addr_hi = + upper_32_bits(q->gart_mqd_addr); + + packet->wptr_addr_lo = + lower_32_bits((uint64_t)q->properties.write_ptr); + + packet->wptr_addr_hi = + upper_32_bits((uint64_t)q->properties.write_ptr); + + return 0; +} + +static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, + enum kfd_queue_type type, + enum kfd_unmap_queues_filter filter, + uint32_t filter_param, bool reset, + unsigned int sdma_engine) +{ + struct pm4_mes_unmap_queues *packet; + + packet = (struct pm4_mes_unmap_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); + + packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, + sizeof(struct pm4_mes_unmap_queues)); + switch (type) { + case KFD_QUEUE_TYPE_COMPUTE: + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_unmap_queues__legacy_engine_sel; + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__compute; + break; + case KFD_QUEUE_TYPE_SDMA: + case KFD_QUEUE_TYPE_SDMA_XGMI: + if (sdma_engine < 2) { + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_unmap_queues__legacy_engine_sel; + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__sdma0 + sdma_engine; + } else { + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel; + packet->bitfields2.engine_sel = sdma_engine; + } + break; + default: + WARN(1, "queue type %d", type); + return -EINVAL; + } + + if (reset) + packet->bitfields2.action = + action__mes_unmap_queues__reset_queues; + else + packet->bitfields2.action = + action__mes_unmap_queues__preempt_queues; + + switch (filter) { + case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_specified_queues; + packet->bitfields2.num_queues = 1; + packet->bitfields3b.doorbell_offset0 = filter_param; + break; + case KFD_UNMAP_QUEUES_FILTER_BY_PASID: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; + packet->bitfields3a.pasid = filter_param; + break; + case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__unmap_all_queues; + break; + case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: + /* in this case, we do not preempt static queues */ + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__unmap_all_non_static_queues; + break; + default: + WARN(1, "filter %d", filter); + return -EINVAL; + } + + return 0; + +} + +static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, + uint64_t fence_address, uint32_t fence_value) +{ + struct pm4_mes_query_status *packet; + + packet = (struct pm4_mes_query_status *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_query_status)); + + + packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, + sizeof(struct pm4_mes_query_status)); + + packet->bitfields2.context_id = 0; + packet->bitfields2.interrupt_sel = + interrupt_sel__mes_query_status__completion_status; + packet->bitfields2.command = + command__mes_query_status__fence_only_after_write_ack; + + packet->addr_hi = upper_32_bits((uint64_t)fence_address); + packet->addr_lo = lower_32_bits((uint64_t)fence_address); + packet->data_hi = upper_32_bits((uint64_t)fence_value); + packet->data_lo = lower_32_bits((uint64_t)fence_value); + + return 0; +} + +const struct packet_manager_funcs kfd_v9_pm_funcs = { + .map_process = pm_map_process_v9, + .runlist = pm_runlist_v9, + .set_resources = pm_set_resources_v9, + .map_queues = pm_map_queues_v9, + .unmap_queues = pm_unmap_queues_v9, + .query_status = pm_query_status_v9, + .release_mem = NULL, + .map_process_size = sizeof(struct pm4_mes_map_process), + .runlist_size = sizeof(struct pm4_mes_runlist), + .set_resources_size = sizeof(struct pm4_mes_set_resources), + .map_queues_size = sizeof(struct pm4_mes_map_queues), + .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .query_status_size = sizeof(struct pm4_mes_query_status), + .release_mem_size = 0, +}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c new file mode 100644 index 000000000000..bed4d0ccb6b1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -0,0 +1,334 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "kfd_kernel_queue.h" +#include "kfd_device_queue_manager.h" +#include "kfd_pm4_headers_vi.h" +#include "kfd_pm4_opcodes.h" + +unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size) +{ + union PM4_MES_TYPE_3_HEADER header; + + header.u32All = 0; + header.opcode = opcode; + header.count = packet_size / 4 - 2; + header.type = PM4_TYPE_3; + + return header.u32All; +} + +static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer, + struct qcm_process_device *qpd) +{ + struct pm4_mes_map_process *packet; + + packet = (struct pm4_mes_map_process *)buffer; + + memset(buffer, 0, sizeof(struct pm4_mes_map_process)); + + packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, + sizeof(struct pm4_mes_map_process)); + packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; + packet->bitfields2.process_quantum = 1; + packet->bitfields2.pasid = qpd->pqm->process->pasid; + packet->bitfields3.page_table_base = qpd->page_table_base; + packet->bitfields10.gds_size = qpd->gds_size; + packet->bitfields10.num_gws = qpd->num_gws; + packet->bitfields10.num_oac = qpd->num_oac; + packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; + + packet->sh_mem_config = qpd->sh_mem_config; + packet->sh_mem_bases = qpd->sh_mem_bases; + packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; + packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; + + packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base; + + packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); + packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); + + return 0; +} + +static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, + uint64_t ib, size_t ib_size_in_dwords, bool chain) +{ + struct pm4_mes_runlist *packet; + int concurrent_proc_cnt = 0; + struct kfd_dev *kfd = pm->dqm->dev; + + if (WARN_ON(!ib)) + return -EFAULT; + + /* Determine the number of processes to map together to HW: + * it can not exceed the number of VMIDs available to the + * scheduler, and it is determined by the smaller of the number + * of processes in the runlist and kfd module parameter + * hws_max_conc_proc. + * Note: the arbitration between the number of VMIDs and + * hws_max_conc_proc has been done in + * kgd2kfd_device_init(). + */ + concurrent_proc_cnt = min(pm->dqm->processes_count, + kfd->max_proc_per_quantum); + + packet = (struct pm4_mes_runlist *)buffer; + + memset(buffer, 0, sizeof(struct pm4_mes_runlist)); + packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, + sizeof(struct pm4_mes_runlist)); + + packet->bitfields4.ib_size = ib_size_in_dwords; + packet->bitfields4.chain = chain ? 1 : 0; + packet->bitfields4.offload_polling = 0; + packet->bitfields4.valid = 1; + packet->bitfields4.process_cnt = concurrent_proc_cnt; + packet->ordinal2 = lower_32_bits(ib); + packet->bitfields3.ib_base_hi = upper_32_bits(ib); + + return 0; +} + +int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, + struct scheduling_resources *res) +{ + struct pm4_mes_set_resources *packet; + + packet = (struct pm4_mes_set_resources *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_set_resources)); + + packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES, + sizeof(struct pm4_mes_set_resources)); + + packet->bitfields2.queue_type = + queue_type__mes_set_resources__hsa_interface_queue_hiq; + packet->bitfields2.vmid_mask = res->vmid_mask; + packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; + packet->bitfields7.oac_mask = res->oac_mask; + packet->bitfields8.gds_heap_base = res->gds_heap_base; + packet->bitfields8.gds_heap_size = res->gds_heap_size; + + packet->gws_mask_lo = lower_32_bits(res->gws_mask); + packet->gws_mask_hi = upper_32_bits(res->gws_mask); + + packet->queue_mask_lo = lower_32_bits(res->queue_mask); + packet->queue_mask_hi = upper_32_bits(res->queue_mask); + + return 0; +} + +static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer, + struct queue *q, bool is_static) +{ + struct pm4_mes_map_queues *packet; + bool use_static = is_static; + + packet = (struct pm4_mes_map_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); + + packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, + sizeof(struct pm4_mes_map_queues)); + packet->bitfields2.num_queues = 1; + packet->bitfields2.queue_sel = + queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; + + packet->bitfields2.engine_sel = + engine_sel__mes_map_queues__compute_vi; + packet->bitfields2.queue_type = + queue_type__mes_map_queues__normal_compute_vi; + + switch (q->properties.type) { + case KFD_QUEUE_TYPE_COMPUTE: + if (use_static) + packet->bitfields2.queue_type = + queue_type__mes_map_queues__normal_latency_static_queue_vi; + break; + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.queue_type = + queue_type__mes_map_queues__debug_interface_queue_vi; + break; + case KFD_QUEUE_TYPE_SDMA: + case KFD_QUEUE_TYPE_SDMA_XGMI: + packet->bitfields2.engine_sel = q->properties.sdma_engine_id + + engine_sel__mes_map_queues__sdma0_vi; + use_static = false; /* no static queues under SDMA */ + break; + default: + WARN(1, "queue type %d", q->properties.type); + return -EINVAL; + } + packet->bitfields3.doorbell_offset = + q->properties.doorbell_off; + + packet->mqd_addr_lo = + lower_32_bits(q->gart_mqd_addr); + + packet->mqd_addr_hi = + upper_32_bits(q->gart_mqd_addr); + + packet->wptr_addr_lo = + lower_32_bits((uint64_t)q->properties.write_ptr); + + packet->wptr_addr_hi = + upper_32_bits((uint64_t)q->properties.write_ptr); + + return 0; +} + +static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, + enum kfd_queue_type type, + enum kfd_unmap_queues_filter filter, + uint32_t filter_param, bool reset, + unsigned int sdma_engine) +{ + struct pm4_mes_unmap_queues *packet; + + packet = (struct pm4_mes_unmap_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); + + packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, + sizeof(struct pm4_mes_unmap_queues)); + switch (type) { + case KFD_QUEUE_TYPE_COMPUTE: + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__compute; + break; + case KFD_QUEUE_TYPE_SDMA: + case KFD_QUEUE_TYPE_SDMA_XGMI: + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__sdma0 + sdma_engine; + break; + default: + WARN(1, "queue type %d", type); + return -EINVAL; + } + + if (reset) + packet->bitfields2.action = + action__mes_unmap_queues__reset_queues; + else + packet->bitfields2.action = + action__mes_unmap_queues__preempt_queues; + + switch (filter) { + case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_specified_queues; + packet->bitfields2.num_queues = 1; + packet->bitfields3b.doorbell_offset0 = filter_param; + break; + case KFD_UNMAP_QUEUES_FILTER_BY_PASID: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; + packet->bitfields3a.pasid = filter_param; + break; + case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__unmap_all_queues; + break; + case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: + /* in this case, we do not preempt static queues */ + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__unmap_all_non_static_queues; + break; + default: + WARN(1, "filter %d", filter); + return -EINVAL; + } + + return 0; + +} + +static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer, + uint64_t fence_address, uint32_t fence_value) +{ + struct pm4_mes_query_status *packet; + + packet = (struct pm4_mes_query_status *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_query_status)); + + packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, + sizeof(struct pm4_mes_query_status)); + + packet->bitfields2.context_id = 0; + packet->bitfields2.interrupt_sel = + interrupt_sel__mes_query_status__completion_status; + packet->bitfields2.command = + command__mes_query_status__fence_only_after_write_ack; + + packet->addr_hi = upper_32_bits((uint64_t)fence_address); + packet->addr_lo = lower_32_bits((uint64_t)fence_address); + packet->data_hi = upper_32_bits((uint64_t)fence_value); + packet->data_lo = lower_32_bits((uint64_t)fence_value); + + return 0; +} + +static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer) +{ + struct pm4_mec_release_mem *packet; + + packet = (struct pm4_mec_release_mem *)buffer; + memset(buffer, 0, sizeof(*packet)); + + packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM, + sizeof(*packet)); + + packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; + packet->bitfields2.event_index = event_index___release_mem__end_of_pipe; + packet->bitfields2.tcl1_action_ena = 1; + packet->bitfields2.tc_action_ena = 1; + packet->bitfields2.cache_policy = cache_policy___release_mem__lru; + packet->bitfields2.atc = 0; + + packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low; + packet->bitfields3.int_sel = + int_sel___release_mem__send_interrupt_after_write_confirm; + + packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; + packet->address_hi = upper_32_bits(gpu_addr); + + packet->data_lo = 0; + + return 0; +} + +const struct packet_manager_funcs kfd_vi_pm_funcs = { + .map_process = pm_map_process_vi, + .runlist = pm_runlist_vi, + .set_resources = pm_set_resources_vi, + .map_queues = pm_map_queues_vi, + .unmap_queues = pm_unmap_queues_vi, + .query_status = pm_query_status_vi, + .release_mem = pm_release_mem_vi, + .map_process_size = sizeof(struct pm4_mes_map_process), + .runlist_size = sizeof(struct pm4_mes_runlist), + .set_resources_size = sizeof(struct pm4_mes_set_resources), + .map_queues_size = sizeof(struct pm4_mes_map_queues), + .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .query_status_size = sizeof(struct pm4_mes_query_status), + .release_mem_size = sizeof(struct pm4_mec_release_mem) +}; -- cgit From baaeb610b17a3419fa31a5c70a2bb84528a6eaa7 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 13 Nov 2019 22:24:12 +0800 Subject: drm/amdgpu: enable ras capablity check on arcturus check hw ras capablity via atomfirmware Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Reviewed-by: John Clements Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 404483437bd3..1593564578b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1687,7 +1687,8 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, *supported = 0; if (amdgpu_sriov_vf(adev) || - adev->asic_type != CHIP_VEGA20) + (adev->asic_type != CHIP_VEGA20 && + adev->asic_type != CHIP_ARCTURUS)) return; if (adev->is_atom_fw && -- cgit From 9e612c11a758087a1acbc1cc1d84f2deaf496b34 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 13 Nov 2019 22:26:22 +0800 Subject: drm/amdgpu: init umc functions for arcturus umc ras reuse vg20 umc functions for arcturus umc ras Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Reviewed-by: John Clements Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 9f2a893871ec..ee615d050837 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -639,6 +639,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.funcs = &umc_v6_0_funcs; break; case CHIP_VEGA20: + case CHIP_ARCTURUS: adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; @@ -752,6 +753,7 @@ static int gmc_v9_0_late_init(void *handle) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA20: + case CHIP_ARCTURUS: r = amdgpu_atomfirmware_mem_ecc_supported(adev); if (!r) { DRM_INFO("ECC is not present.\n"); -- cgit From 0bb419c76b31506194a0ba9447f84fc177d83910 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 29 Oct 2019 16:59:09 +0800 Subject: drm/amdgpu/gfx10: fix mqd backup/restore for gfx rings (v2) 1. no need to allocate an extra member for 'mqd_backup' array 2. backup/restore mqd to/from the correct 'mqd_backup' array slot v2: warning fix (Alex) Signed-off-by: Xiaojie Yuan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 9 +++++---- 3 files changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index c9d1fada6188..f063a5c7bd8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -455,7 +455,7 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) ring = &adev->gfx.kiq.ring; if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) - kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]); + kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS - 1]); kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index a74ecd449775..0ae0a2715b0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -225,7 +225,7 @@ struct amdgpu_me { uint32_t num_me; uint32_t num_pipe_per_me; uint32_t num_queue_per_pipe; - void *mqd_backup[AMDGPU_MAX_GFX_RINGS + 1]; + void *mqd_backup[AMDGPU_MAX_GFX_RINGS]; /* These are the resources for which amdgpu takes ownership */ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index a93dd3dc0902..b1280120845f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3114,6 +3114,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct v10_gfx_mqd *mqd = ring->mqd_ptr; + int mqd_idx = ring - &adev->gfx.gfx_ring[0]; if (!adev->in_gpu_reset && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); @@ -3125,12 +3126,12 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) #endif nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]) - memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd)); + if (adev->gfx.me.mqd_backup[mqd_idx]) + memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else if (adev->in_gpu_reset) { /* reset mqd with the backup copy */ - if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]) - memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd)); + if (adev->gfx.me.mqd_backup[mqd_idx]) + memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; adev->wb.wb[ring->wptr_offs] = 0; -- cgit From 88a1c40a04de534353fe7389654557aa886bc445 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 10:00:24 -0500 Subject: drm/amdgpu: add JPEG HW IP and SW structures It will be used for JPEG IP 1.0, 2.0, 2.5 and later. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 46 ++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bcc5d40a8d5f..89499501ecec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -69,6 +69,7 @@ #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "amdgpu_vcn.h" +#include "amdgpu_jpeg.h" #include "amdgpu_mn.h" #include "amdgpu_gmc.h" #include "amdgpu_gfx.h" @@ -704,6 +705,7 @@ enum amd_hw_ip_block_type { MP1_HWIP, UVD_HWIP, VCN_HWIP = UVD_HWIP, + JPEG_HWIP = VCN_HWIP, VCE_HWIP, DF_HWIP, DCE_HWIP, @@ -899,6 +901,9 @@ struct amdgpu_device { /* vcn */ struct amdgpu_vcn vcn; + /* jpeg */ + struct amdgpu_jpeg jpeg; + /* firmwares */ struct amdgpu_firmware firmware; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h new file mode 100644 index 000000000000..36e2b7340c97 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -0,0 +1,46 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_JPEG_H__ +#define __AMDGPU_JPEG_H__ + +#define AMDGPU_MAX_JPEG_INSTANCES 2 + +struct amdgpu_jpeg_reg{ + unsigned jpeg_pitch; +}; + +struct amdgpu_jpeg_inst { + struct amdgpu_ring ring_dec; + struct amdgpu_irq_src irq; + struct amdgpu_jpeg_reg external; +}; + +struct amdgpu_jpeg { + uint8_t num_jpeg_inst; + struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES]; + struct amdgpu_jpeg_reg internal; + unsigned harvest_config; +}; + +#endif /*__AMDGPU_JPEG_H__*/ -- cgit From 9d9cc9b8fe85e7c7cdfc58b61830ca38f770cab1 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 10:07:56 -0500 Subject: drm/amdgpu: add amdgpu_jpeg and JPEG tests It will be used for all versions of JPEG eventually. Previous JPEG tests will be removed later since they are still used by JPEG2.x. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c | 135 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 3 + 3 files changed, 141 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index ca0e435559d5..b6c9d54266dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -147,12 +147,13 @@ amdgpu-y += \ vce_v3_0.o \ vce_v4_0.o -# add VCN block +# add VCN and JPEG block amdgpu-y += \ amdgpu_vcn.o \ vcn_v1_0.o \ vcn_v2_0.o \ - vcn_v2_5.o + vcn_v2_5.o \ + amdgpu_jpeg.o # add ATHUB block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c new file mode 100644 index 000000000000..d9a547d4d3b2 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -0,0 +1,135 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "soc15d.h" +#include "soc15_common.h" + +int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD); + r = amdgpu_ring_alloc(ring, 3); + if (r) + return r; + + amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch, 0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + return r; +} + +static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, + struct dma_fence **fence) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_job *job; + struct amdgpu_ib *ib; + struct dma_fence *f = NULL; + const unsigned ib_size_dw = 16; + int i, r; + + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + if (r) + return r; + + ib = &job->ibs[0]; + + ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); + ib->ptr[1] = 0xDEADBEEF; + for (i = 2; i < 16; i += 2) { + ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); + ib->ptr[i+1] = 0; + } + ib->length_dw = 16; + + r = amdgpu_job_submit_direct(job, ring, &f); + if (r) + goto err; + + if (fence) + *fence = dma_fence_get(f); + dma_fence_put(f); + + return 0; + +err: + amdgpu_job_free(job); + return r; +} + +int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + struct dma_fence *fence = NULL; + long r = 0; + + r = amdgpu_jpeg_dec_set_reg(ring, 1, &fence); + if (r) + goto error; + + r = dma_fence_wait_timeout(fence, false, timeout); + if (r == 0) { + r = -ETIMEDOUT; + goto error; + } else if (r < 0) { + goto error; + } else { + r = 0; + } + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + dma_fence_put(fence); +error: + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 36e2b7340c97..a8d988c25f45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -43,4 +43,7 @@ struct amdgpu_jpeg { unsigned harvest_config; }; +int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring); +int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); + #endif /*__AMDGPU_JPEG_H__*/ -- cgit From bb0db70f3f75e0d994a627f22c3ff2d8ec6191c4 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 10:17:06 -0500 Subject: drm/amdgpu: separate JPEG1.0 code out from VCN1.0 For VCN1.0, the separation is just in code wise, JPEG1.0 HW is still included in the VCN1.0 HW. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c | 584 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h | 32 ++ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 481 +-------------------------- 4 files changed, 630 insertions(+), 470 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index b6c9d54266dc..b6d4b227590a 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -153,7 +153,8 @@ amdgpu-y += \ vcn_v1_0.o \ vcn_v2_0.o \ vcn_v2_5.o \ - amdgpu_jpeg.o + amdgpu_jpeg.o \ + jpeg_v1_0.o # add ATHUB block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c new file mode 100644 index 000000000000..553506df077d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -0,0 +1,584 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "soc15.h" +#include "soc15d.h" + +#include "vcn/vcn_1_0_offset.h" +#include "vcn/vcn_1_0_sh_mask.h" + +static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev); + +static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val) +{ + struct amdgpu_device *adev = ring->adev; + ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + ring->ring[(*ptr)++] = 0; + ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0); + } else { + ring->ring[(*ptr)++] = reg_offset; + ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0); + } + ring->ring[(*ptr)++] = val; +} + +static void jpeg_v1_0_decode_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr) +{ + struct amdgpu_device *adev = ring->adev; + + uint32_t reg, reg_offset, val, mask, i; + + // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW); + reg_offset = (reg << 2); + val = lower_32_bits(ring->gpu_addr); + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH); + reg_offset = (reg << 2); + val = upper_32_bits(ring->gpu_addr); + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 3rd to 5th: issue MEM_READ commands + for (i = 0; i <= 2; i++) { + ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2); + ring->ring[ptr++] = 0; + } + + // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL); + reg_offset = (reg << 2); + val = 0x13; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 7th: program mmUVD_JRBC_RB_REF_DATA + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA); + reg_offset = (reg << 2); + val = 0x1; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL); + reg_offset = (reg << 2); + val = 0x1; + mask = 0x1; + + ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0); + ring->ring[ptr++] = 0x01400200; + ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0); + ring->ring[ptr++] = val; + ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + ring->ring[ptr++] = 0; + ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3); + } else { + ring->ring[ptr++] = reg_offset; + ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3); + } + ring->ring[ptr++] = mask; + + //9th to 21st: insert no-op + for (i = 0; i <= 12; i++) { + ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); + ring->ring[ptr++] = 0; + } + + //22nd: reset mmUVD_JRBC_RB_RPTR + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_RPTR); + reg_offset = (reg << 2); + val = 0; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL); + reg_offset = (reg << 2); + val = 0x12; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); +} + +/** + * jpeg_v1_0_decode_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v1_0_decode_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v1_0_decode_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v1_0_decode_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v1_0_decode_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v1_0_decode_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); +} + +/** + * jpeg_v1_0_decode_ring_insert_start - insert a start command + * + * @ring: amdgpu_ring pointer + * + * Write a start command to the ring. + */ +static void jpeg_v1_0_decode_ring_insert_start(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80010000); +} + +/** + * jpeg_v1_0_decode_ring_insert_end - insert a end command + * + * @ring: amdgpu_ring pointer + * + * Write a end command to the ring. + */ +static void jpeg_v1_0_decode_ring_insert_end(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x00010000); +} + +/** + * jpeg_v1_0_decode_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @fence: fence to emit + * + * Write a fence and a trap command to the ring. + */ +static void jpeg_v1_0_decode_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + struct amdgpu_device *adev = ring->adev; + + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2)); + amdgpu_ring_write(ring, 0xffffffff); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x3fbc); + + amdgpu_ring_write(ring, + PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x1); + + /* emit trap */ + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); + amdgpu_ring_write(ring, 0); +} + +/** + * jpeg_v1_0_decode_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @ib: indirect buffer to execute + * + * Write ring commands to execute the indirect buffer. + */ +static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, ib->length_dw); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x2); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); + amdgpu_ring_write(ring, 0x2); +} + +static void jpeg_v1_0_decode_ring_emit_reg_wait(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val, + uint32_t mask) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, val); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, + PACKETJ(0, 0, 0, PACKETJ_TYPE3)); + } + amdgpu_ring_write(ring, mask); +} + +static void jpeg_v1_0_decode_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + uint32_t data0, data1, mask; + + pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + + /* wait for register write */ + data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; + data1 = lower_32_bits(pd_addr); + mask = 0xffffffff; + jpeg_v1_0_decode_ring_emit_reg_wait(ring, data0, data1, mask); +} + +static void jpeg_v1_0_decode_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, + PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + } + amdgpu_ring_write(ring, val); +} + +static void jpeg_v1_0_decode_ring_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); + } +} + +static int jpeg_v1_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: JPEG decode TRAP\n"); + + switch (entry->src_id) { + case 126: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +/** + * jpeg_v1_0_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +int jpeg_v1_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->jpeg.num_jpeg_inst = 1; + + jpeg_v1_0_set_dec_ring_funcs(adev); + jpeg_v1_0_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v1_0_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + */ +int jpeg_v1_0_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int r; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->jpeg.inst->irq); + if (r) + return r; + + ring = &adev->jpeg.inst->ring_dec; + sprintf(ring->name, "jpeg_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = adev->jpeg.inst->external.jpeg_pitch = + SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); + + return 0; +} + +/** + * jpeg_v1_0_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG free up sw allocation + */ +void jpeg_v1_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_ring_fini(&adev->jpeg.inst[0].ring_dec); +} + +/** + * jpeg_v1_0_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +void jpeg_v1_0_start(struct amdgpu_device *adev, int mode) +{ + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + + if (mode == 0) { + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | + UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0); + } WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); + + /* initialize wptr */ + ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); + + /* copy patch commands to the jpeg ring */ + jpeg_v1_0_decode_ring_set_patch_ring(ring, + (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); +} + +static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .nop = PACKET0(0x81ff, 0), + .support_64bit_ptrs = false, + .no_user_fence = true, + .vmhub = AMDGPU_MMHUB_0, + .extra_dw = 64, + .get_rptr = jpeg_v1_0_decode_ring_get_rptr, + .get_wptr = jpeg_v1_0_decode_ring_get_wptr, + .set_wptr = jpeg_v1_0_decode_ring_set_wptr, + .emit_frame_size = + 6 + 6 + /* hdp invalidate / flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v1_0_decode_ring_emit_vm_flush */ + 26 + 26 + /* jpeg_v1_0_decode_ring_emit_fence x2 vm fence */ + 6, + .emit_ib_size = 22, /* jpeg_v1_0_decode_ring_emit_ib */ + .emit_ib = jpeg_v1_0_decode_ring_emit_ib, + .emit_fence = jpeg_v1_0_decode_ring_emit_fence, + .emit_vm_flush = jpeg_v1_0_decode_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v1_0_decode_ring_nop, + .insert_start = jpeg_v1_0_decode_ring_insert_start, + .insert_end = jpeg_v1_0_decode_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = jpeg_v1_0_decode_ring_emit_wreg, + .emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->ring_dec.funcs = &jpeg_v1_0_decode_ring_vm_funcs; + DRM_INFO("JPEG decode is enabled in VM mode\n"); +} + +static const struct amdgpu_irq_src_funcs jpeg_v1_0_irq_funcs = { + .set = jpeg_v1_0_set_interrupt_state, + .process = jpeg_v1_0_process_interrupt, +}; + +static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h new file mode 100644 index 000000000000..bbf33a6a3972 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V1_0_H__ +#define __JPEG_V1_0_H__ + +int jpeg_v1_0_early_init(void *handle); +int jpeg_v1_0_sw_init(void *handle); +void jpeg_v1_0_sw_fini(void *handle); +void jpeg_v1_0_start(struct amdgpu_device *adev, int mode); + +#endif /*__JPEG_V1_0_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index b4f84a820a44..652cecc030b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -36,6 +36,7 @@ #include "mmhub/mmhub_9_1_sh_mask.h" #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h" +#include "jpeg_v1_0.h" #define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 @@ -45,9 +46,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev); static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); -static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); -static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr); static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, struct dpg_pause_state *new_state); @@ -68,9 +67,10 @@ static int vcn_v1_0_early_init(void *handle) vcn_v1_0_set_dec_ring_funcs(adev); vcn_v1_0_set_enc_ring_funcs(adev); - vcn_v1_0_set_jpeg_ring_funcs(adev); vcn_v1_0_set_irq_funcs(adev); + jpeg_v1_0_early_init(handle); + return 0; } @@ -101,11 +101,6 @@ static int vcn_v1_0_sw_init(void *handle) return r; } - /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.inst->irq); - if (r) - return r; - r = amdgpu_vcn_sw_init(adev); if (r) return r; @@ -149,17 +144,11 @@ static int vcn_v1_0_sw_init(void *handle) return r; } - ring = &adev->vcn.inst->ring_jpeg; - sprintf(ring->name, "vcn_jpeg"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); - if (r) - return r; - adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode; - adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch = - SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); - return 0; + r = jpeg_v1_0_sw_init(handle); + + return r; } /** @@ -178,6 +167,8 @@ static int vcn_v1_0_sw_fini(void *handle) if (r) return r; + jpeg_v1_0_sw_fini(handle); + r = amdgpu_vcn_sw_fini(adev); return r; @@ -207,7 +198,7 @@ static int vcn_v1_0_hw_init(void *handle) goto done; } - ring = &adev->vcn.inst->ring_jpeg; + ring = &adev->jpeg.inst->ring_dec; r = amdgpu_ring_test_helper(ring); if (r) goto done; @@ -947,22 +938,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); - ring = &adev->vcn.inst->ring_jpeg; - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | - UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); - - /* initialize wptr */ - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - - /* copy patch commands to the jpeg ring */ - vcn_v1_0_jpeg_ring_set_patch_ring(ring, - (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); + jpeg_v1_0_start(adev, 0); return 0; } @@ -1106,13 +1082,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - /* initialize JPEG wptr */ - ring = &adev->vcn.inst->ring_jpeg; - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - - /* copy patch commands to the jpeg ring */ - vcn_v1_0_jpeg_ring_set_patch_ring(ring, - (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); + jpeg_v1_0_start(adev, 1); return 0; } @@ -1316,7 +1286,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code); /* Restore */ - ring = &adev->vcn.inst->ring_jpeg; + ring = &adev->jpeg.inst->ring_dec; WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | @@ -1716,389 +1686,6 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } - -/** - * vcn_v1_0_jpeg_ring_get_rptr - get read pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware read pointer - */ -static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR); -} - -/** - * vcn_v1_0_jpeg_ring_get_wptr - get write pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware write pointer - */ -static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); -} - -/** - * vcn_v1_0_jpeg_ring_set_wptr - set write pointer - * - * @ring: amdgpu_ring pointer - * - * Commits the write pointer to the hardware - */ -static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); -} - -/** - * vcn_v1_0_jpeg_ring_insert_start - insert a start command - * - * @ring: amdgpu_ring pointer - * - * Write a start command to the ring. - */ -static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x80010000); -} - -/** - * vcn_v1_0_jpeg_ring_insert_end - insert a end command - * - * @ring: amdgpu_ring pointer - * - * Write a end command to the ring. - */ -static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x00010000); -} - -/** - * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command - * - * @ring: amdgpu_ring pointer - * @fence: fence to emit - * - * Write a fence and a trap command to the ring. - */ -static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags) -{ - struct amdgpu_device *adev = ring->adev; - - WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x8); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2)); - amdgpu_ring_write(ring, 0xffffffff); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x3fbc); - - amdgpu_ring_write(ring, - PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x1); - - /* emit trap */ - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); - amdgpu_ring_write(ring, 0); -} - -/** - * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer - * - * @ring: amdgpu_ring pointer - * @ib: indirect buffer to execute - * - * Write ring commands to execute the indirect buffer. - */ -static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_job *job, - struct amdgpu_ib *ib, - uint32_t flags) -{ - struct amdgpu_device *adev = ring->adev; - unsigned vmid = AMDGPU_JOB_GET_VMID(job); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, ib->length_dw); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x2); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); - amdgpu_ring_write(ring, 0x2); -} - -static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val, - uint32_t mask) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, val); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, - PACKETJ(0, 0, 0, PACKETJ_TYPE3)); - } - amdgpu_ring_write(ring, mask); -} - -static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) -{ - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t data0, data1, mask; - - pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); - - /* wait for register write */ - data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; - data1 = lower_32_bits(pd_addr); - mask = 0xffffffff; - vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask); -} - -static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, - PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - } - amdgpu_ring_write(ring, val); -} - -static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count) -{ - int i; - - WARN_ON(ring->wptr % 2 || count % 2); - - for (i = 0; i < count / 2; i++) { - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); - amdgpu_ring_write(ring, 0); - } -} - -static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val) -{ - struct amdgpu_device *adev = ring->adev; - ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - ring->ring[(*ptr)++] = 0; - ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0); - } else { - ring->ring[(*ptr)++] = reg_offset; - ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0); - } - ring->ring[(*ptr)++] = val; -} - -static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr) -{ - struct amdgpu_device *adev = ring->adev; - - uint32_t reg, reg_offset, val, mask, i; - - // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW); - reg_offset = (reg << 2); - val = lower_32_bits(ring->gpu_addr); - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH); - reg_offset = (reg << 2); - val = upper_32_bits(ring->gpu_addr); - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 3rd to 5th: issue MEM_READ commands - for (i = 0; i <= 2; i++) { - ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2); - ring->ring[ptr++] = 0; - } - - // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); - reg_offset = (reg << 2); - val = 0x13; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 7th: program mmUVD_JRBC_RB_REF_DATA - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA); - reg_offset = (reg << 2); - val = 0x1; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); - reg_offset = (reg << 2); - val = 0x1; - mask = 0x1; - - ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0); - ring->ring[ptr++] = 0x01400200; - ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0); - ring->ring[ptr++] = val; - ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - ring->ring[ptr++] = 0; - ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3); - } else { - ring->ring[ptr++] = reg_offset; - ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3); - } - ring->ring[ptr++] = mask; - - //9th to 21st: insert no-op - for (i = 0; i <= 12; i++) { - ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); - ring->ring[ptr++] = 0; - } - - //22nd: reset mmUVD_JRBC_RB_RPTR - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR); - reg_offset = (reg << 2); - val = 0; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); - reg_offset = (reg << 2); - val = 0x12; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); -} - static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -2123,9 +1710,6 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev, case 120: amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]); break; - case 126: - amdgpu_fence_process(&adev->vcn.inst->ring_jpeg); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -2259,41 +1843,6 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_JPEG, - .align_mask = 0xf, - .nop = PACKET0(0x81ff, 0), - .support_64bit_ptrs = false, - .no_user_fence = true, - .vmhub = AMDGPU_MMHUB_0, - .extra_dw = 64, - .get_rptr = vcn_v1_0_jpeg_ring_get_rptr, - .get_wptr = vcn_v1_0_jpeg_ring_get_wptr, - .set_wptr = vcn_v1_0_jpeg_ring_set_wptr, - .emit_frame_size = - 6 + 6 + /* hdp invalidate / flush */ - SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */ - 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */ - 6, - .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */ - .emit_ib = vcn_v1_0_jpeg_ring_emit_ib, - .emit_fence = vcn_v1_0_jpeg_ring_emit_fence, - .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_jpeg_ring_test_ring, - .test_ib = amdgpu_vcn_jpeg_ring_test_ib, - .insert_nop = vcn_v1_0_jpeg_ring_nop, - .insert_start = vcn_v1_0_jpeg_ring_insert_start, - .insert_end = vcn_v1_0_jpeg_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg, - .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) { adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs; @@ -2310,12 +1859,6 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev) DRM_INFO("VCN encode is enabled in VM mode\n"); } -static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) -{ - adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs; - DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); -} - static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { .set = vcn_v1_0_set_interrupt_state, .process = vcn_v1_0_process_interrupt, -- cgit From 0388aee766376edfd4638f203b6f6260831665f5 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 10:23:14 -0500 Subject: drm/amdgpu: use the JPEG structure for general driver support JPEG1.0 will be functional along with VCN1.0 Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 8 +++----- 3 files changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6614d8a6f4c8..8f2eea92d67c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -169,10 +169,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, } break; case AMDGPU_HW_IP_VCN_JPEG: - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; - rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg; + rings[num_rings++] = &adev->jpeg.inst[j].ring_dec; } break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 3827dcf7e48d..58fb75cb28ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -401,11 +401,11 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, break; case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->uvd.harvest_config & (1 << i)) + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + if (adev->jpeg.harvest_config & (1 << i)) continue; - if (adev->vcn.inst[i].ring_jpeg.sched.ready) + if (adev->jpeg.inst[i].ring_dec.sched.ready) ++num_rings; } ib_start_alignment = 16; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 9d870444d7d6..4715115c8f06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -214,8 +214,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_enc_rings; ++i) amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); - - amdgpu_ring_fini(&adev->vcn.inst[j].ring_jpeg); } release_firmware(adev->vcn.fw); @@ -308,7 +306,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg)) + if (amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else new_state.jpeg = VCN_DPG_STATE__UNPAUSE; @@ -316,7 +314,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) adev->vcn.pause_dpg_mode(adev, &new_state); } - fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg); + fence[j] += amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec); fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); fences += fence[j]; } @@ -360,7 +358,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_jpeg)) + if (amdgpu_fence_count_emitted(&adev->jpeg.inst[ring->me].ring_dec)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else new_state.jpeg = VCN_DPG_STATE__UNPAUSE; -- cgit From 8d1b04a6a1dc654d36ab51211fba7af86f0940e6 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 12:44:54 -0500 Subject: drm/amdgpu: add JPEG IP block type From VCN2.0, JPEG2.0 is a separated IP block. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/amd_shared.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index dc7eb28f0296..d5bc8be4d70c 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -53,7 +53,8 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_VCE, AMD_IP_BLOCK_TYPE_ACP, AMD_IP_BLOCK_TYPE_VCN, - AMD_IP_BLOCK_TYPE_MES + AMD_IP_BLOCK_TYPE_MES, + AMD_IP_BLOCK_TYPE_JPEG }; enum amd_clockgating_state { -- cgit From 2eb167293f5738c7d6b89ca154b5fddd04774db3 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 13:12:05 -0500 Subject: drm/amdgpu: add JPEG common functions to amdgpu_jpeg They will be used for JPEG2.0 and later. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c | 76 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 10 +++++ 2 files changed, 86 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index d9a547d4d3b2..5727f00afc8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -26,9 +26,85 @@ #include "amdgpu.h" #include "amdgpu_jpeg.h" +#include "amdgpu_pm.h" #include "soc15d.h" #include "soc15_common.h" +#define JPEG_IDLE_TIMEOUT msecs_to_jiffies(1000) + +static void amdgpu_jpeg_idle_work_handler(struct work_struct *work); + +int amdgpu_jpeg_sw_init(struct amdgpu_device *adev) +{ + INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler); + + return 0; +} + +int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev) +{ + int i; + + cancel_delayed_work_sync(&adev->jpeg.idle_work); + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec); + } + + return 0; +} + +int amdgpu_jpeg_suspend(struct amdgpu_device *adev) +{ + cancel_delayed_work_sync(&adev->jpeg.idle_work); + + return 0; +} + +int amdgpu_jpeg_resume(struct amdgpu_device *adev) +{ + return 0; +} + +static void amdgpu_jpeg_idle_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, jpeg.idle_work.work); + unsigned int fences = 0; + unsigned int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); + } + + if (fences == 0) + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG, + AMD_PG_STATE_GATE); + else + schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT); +} + +void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool set_clocks = !cancel_delayed_work_sync(&adev->jpeg.idle_work); + + if (set_clocks) + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG, + AMD_PG_STATE_UNGATE); +} + +void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT); +} + int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index a8d988c25f45..5e2e06ec13df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -41,8 +41,18 @@ struct amdgpu_jpeg { struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES]; struct amdgpu_jpeg_reg internal; unsigned harvest_config; + struct delayed_work idle_work; + enum amd_powergating_state cur_state; }; +int amdgpu_jpeg_sw_init(struct amdgpu_device *adev); +int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev); +int amdgpu_jpeg_suspend(struct amdgpu_device *adev); +int amdgpu_jpeg_resume(struct amdgpu_device *adev); + +void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring); + int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); -- cgit From 6ac27241106bc946bd50032f4cd96899c6a6fe69 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 13:26:46 -0500 Subject: drm/amdgpu: add JPEG v2.0 function supports It got separated from VCN2.0 with a new jpeg_v2_0_ip_block Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 809 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h | 42 ++ 3 files changed, 853 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index b6d4b227590a..c48b3bd73dfe 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -154,7 +154,8 @@ amdgpu-y += \ vcn_v2_0.o \ vcn_v2_5.o \ amdgpu_jpeg.o \ - jpeg_v1_0.o + jpeg_v1_0.o \ + jpeg_v2_0.o # add ATHUB block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c new file mode 100644 index 000000000000..4143ef6905b8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -0,0 +1,809 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" + +#include "vcn/vcn_2_0_0_offset.h" +#include "vcn/vcn_2_0_0_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff +#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029 +#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a +#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb +#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf +#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9 +#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082 +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed +#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085 +#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 +#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 +#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f + +#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 + +static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev); +static int jpeg_v2_0_set_powergating_state(void *handle, + enum amd_powergating_state state); + +/** + * jpeg_v2_0_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int jpeg_v2_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->jpeg.num_jpeg_inst = 1; + + jpeg_v2_0_set_dec_ring_funcs(adev); + jpeg_v2_0_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v2_0_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int jpeg_v2_0_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int r; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); + if (r) + return r; + + r = amdgpu_jpeg_sw_init(adev); + if (r) + return r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + ring = &adev->jpeg.inst->ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; + sprintf(ring->name, "jpeg_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); + + return 0; +} + +/** + * jpeg_v2_0_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG suspend and free up sw allocation + */ +static int jpeg_v2_0_sw_fini(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_jpeg_suspend(adev); + if (r) + return r; + + r = amdgpu_jpeg_sw_fini(adev); + + return r; +} + +/** + * jpeg_v2_0_hw_init - start and test JPEG block + * + * @handle: amdgpu_device pointer + * + */ +static int jpeg_v2_0_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + int r; + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + + r = amdgpu_ring_test_helper(ring); + if (!r) + DRM_INFO("JPEG decode initialized successfully.\n"); + + return r; +} + +/** + * jpeg_v2_0_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the JPEG block, mark ring as not ready any more + */ +static int jpeg_v2_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) + jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE); + + ring->sched.ready = false; + + return 0; +} + +/** + * jpeg_v2_0_suspend - suspend JPEG block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend JPEG block + */ +static int jpeg_v2_0_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = jpeg_v2_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_jpeg_suspend(adev); + + return r; +} + +/** + * jpeg_v2_0_resume - resume JPEG block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init JPEG block + */ +static int jpeg_v2_0_resume(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + r = jpeg_v2_0_hw_init(adev); + + return r; +} + +static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev) +{ + uint32_t data; + int r = 0; + + data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); + + SOC15_WAIT_ON_RREG(JPEG, 0, + mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); + + if (r) { + DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); + return r; + } + + /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */ + data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data); + + return 0; +} + +static int jpeg_v2_0_enable_power_gating(struct amdgpu_device* adev) +{ + uint32_t data; + int r = 0; + + data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)); + data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK; + data |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data); + + data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); + + SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS, + (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); + + if (r) { + DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); + return r; + } + + return 0; +} + +static void jpeg_v2_0_disable_clock_gating(struct amdgpu_device* adev) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); + data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JPEG_ENC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); +} + +static void jpeg_v2_0_enable_clock_gating(struct amdgpu_device* adev) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); + data |= (JPEG_CGC_GATE__JPEG_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JPEG_ENC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); +} + +/** + * jpeg_v2_0_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v2_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + int r; + + /* disable power gating */ + r = jpeg_v2_0_disable_power_gating(adev); + if (r) + return r; + + /* JPEG disable CGC */ + jpeg_v2_0_disable_clock_gating(adev); + + WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); + + return 0; +} + +/** + * jpeg_v2_0_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v2_0_stop(struct amdgpu_device *adev) +{ + int r; + + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable JPEG CGC */ + jpeg_v2_0_enable_clock_gating(adev); + + /* enable power gating */ + r = jpeg_v2_0_enable_power_gating(adev); + + return r; +} + +/** + * jpeg_v2_0_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v2_0_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v2_0_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +/** + * jpeg_v2_0_dec_ring_insert_start - insert a start command + * + * @ring: amdgpu_ring pointer + * + * Write a start command to the ring. + */ +void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80010000); +} + +/** + * jpeg_v2_0_dec_ring_insert_end - insert a end command + * + * @ring: amdgpu_ring pointer + * + * Write a end command to the ring. + */ +void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x00010000); +} + +/** + * jpeg_v2_0_dec_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @fence: fence to emit + * + * Write a fence and a trap command to the ring. + */ +void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, + 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x3fbc); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x1); + + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); + amdgpu_ring_write(ring, 0); +} + +/** + * jpeg_v2_0_dec_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @ib: indirect buffer to execute + * + * Write ring commands to execute the indirect buffer. + */ +void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, ib->length_dw); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x2); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET, + 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); + amdgpu_ring_write(ring, 0x2); +} + +void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, val); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE3)); + } + amdgpu_ring_write(ring, mask); +} + +void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + uint32_t data0, data1, mask; + + pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + + /* wait for register write */ + data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; + data1 = lower_32_bits(pd_addr); + mask = 0xffffffff; + jpeg_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask); +} + +void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) +{ + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + } + amdgpu_ring_write(ring, val); +} + +void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); + } +} + +static bool jpeg_v2_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) & + UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC_STATUS__RB_JOB_DONE_MASK); +} + +static int jpeg_v2_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; + + SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret); + + return ret; +} + +static int jpeg_v2_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (enable) { + if (jpeg_v2_0_is_idle(handle)) + return -EBUSY; + jpeg_v2_0_enable_clock_gating(adev); + } else { + jpeg_v2_0_disable_clock_gating(adev); + } + + return 0; +} + +static int jpeg_v2_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (state == adev->jpeg.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = jpeg_v2_0_stop(adev); + else + ret = jpeg_v2_0_start(adev); + + if (!ret) + adev->jpeg.cur_state = state; + + return ret; +} + +static int jpeg_v2_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: JPEG TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { + .name = "jpeg_v2_0", + .early_init = jpeg_v2_0_early_init, + .late_init = NULL, + .sw_init = jpeg_v2_0_sw_init, + .sw_fini = jpeg_v2_0_sw_fini, + .hw_init = jpeg_v2_0_hw_init, + .hw_fini = jpeg_v2_0_hw_fini, + .suspend = jpeg_v2_0_suspend, + .resume = jpeg_v2_0_resume, + .is_idle = jpeg_v2_0_is_idle, + .wait_for_idle = jpeg_v2_0_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v2_0_set_clockgating_state, + .set_powergating_state = jpeg_v2_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = jpeg_v2_0_dec_ring_get_rptr, + .get_wptr = jpeg_v2_0_dec_ring_get_wptr, + .set_wptr = jpeg_v2_0_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */ + 18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */ + .emit_ib = jpeg_v2_0_dec_ring_emit_ib, + .emit_fence = jpeg_v2_0_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v2_0_dec_ring_nop, + .insert_start = jpeg_v2_0_dec_ring_insert_start, + .insert_end = jpeg_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->ring_dec.funcs = &jpeg_v2_0_dec_ring_vm_funcs; + DRM_INFO("JPEG decode is enabled in VM mode\n"); +} + +static const struct amdgpu_irq_src_funcs jpeg_v2_0_irq_funcs = { + .set = jpeg_v2_0_set_interrupt_state, + .process = jpeg_v2_0_process_interrupt, +}; + +static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->irq.num_types = 1; + adev->jpeg.inst->irq.funcs = &jpeg_v2_0_irq_funcs; +} + +const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &jpeg_v2_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h new file mode 100644 index 000000000000..15a344ed340f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V2_0_H__ +#define __JPEG_V2_0_H__ + +void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring); +void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring); +void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags); +void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, + struct amdgpu_ib *ib, uint32_t flags); +void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask); +void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr); +void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); +void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count); + +extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block; + +#endif /* __JPEG_V2_0_H__ */ -- cgit From b0f3cd3191cdea33c79a7bece86fe3763825228f Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 13:30:15 -0500 Subject: drm/amdgpu: remove unnecessary JPEG2.0 code from VCN2.0 They are no longer needed, using from JPEG2.0 instead. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 260 +--------------------------------- 1 file changed, 3 insertions(+), 257 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 38f787a560cb..ded0ab574f4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -74,7 +74,6 @@ static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev); -static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v2_0_set_powergating_state(void *handle, enum amd_powergating_state state); @@ -97,7 +96,6 @@ static int vcn_v2_0_early_init(void *handle) vcn_v2_0_set_dec_ring_funcs(adev); vcn_v2_0_set_enc_ring_funcs(adev); - vcn_v2_0_set_jpeg_ring_funcs(adev); vcn_v2_0_set_irq_funcs(adev); return 0; @@ -132,12 +130,6 @@ static int vcn_v2_0_sw_init(void *handle) return r; } - /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst->irq); - if (r) - return r; - r = amdgpu_vcn_sw_init(adev); if (r) return r; @@ -194,19 +186,8 @@ static int vcn_v2_0_sw_init(void *handle) return r; } - ring = &adev->vcn.inst->ring_jpeg; - ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; - sprintf(ring->name, "vcn_jpeg"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); - if (r) - return r; - adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode; - adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); - return 0; } @@ -258,11 +239,6 @@ static int vcn_v2_0_hw_init(void *handle) goto done; } - ring = &adev->vcn.inst->ring_jpeg; - r = amdgpu_ring_test_helper(ring); - if (r) - goto done; - done: if (!r) DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", @@ -296,9 +272,6 @@ static int vcn_v2_0_hw_fini(void *handle) ring->sched.ready = false; } - ring = &adev->vcn.inst->ring_jpeg; - ring->sched.ready = false; - return 0; } @@ -393,7 +366,6 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); } static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect) @@ -647,129 +619,6 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev, UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); } -/** - * jpeg_v2_0_start - start JPEG block - * - * @adev: amdgpu_device pointer - * - * Setup and start the JPEG block - */ -static int jpeg_v2_0_start(struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg; - uint32_t tmp; - int r = 0; - - /* disable power gating */ - tmp = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp); - - SOC15_WAIT_ON_RREG(VCN, 0, - mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); - - if (r) { - DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); - return r; - } - - /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */ - tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp); - - /* JPEG disable CGC */ - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp); - - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); - tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK - | JPEG_CGC_GATE__JPEG2_DEC_MASK - | JPEG_CGC_GATE__JPEG_ENC_MASK - | JPEG_CGC_GATE__JMCIF_MASK - | JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp); - - /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, mmJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC_MASK, - ~JPEG_SYS_INT_EN__DJRBC_MASK); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - - return 0; -} - -/** - * jpeg_v2_0_stop - stop JPEG block - * - * @adev: amdgpu_device pointer - * - * stop the JPEG block - */ -static int jpeg_v2_0_stop(struct amdgpu_device *adev) -{ - uint32_t tmp; - int r = 0; - - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable JPEG CGC */ - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp); - - - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); - tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK - |JPEG_CGC_GATE__JPEG2_DEC_MASK - |JPEG_CGC_GATE__JPEG_ENC_MASK - |JPEG_CGC_GATE__JMCIF_MASK - |JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp); - - /* enable power gating */ - tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)); - tmp &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK; - tmp |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp); - - tmp = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp); - - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, - (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); - - if (r) { - DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); - return r; - } - - return r; -} - /** * vcn_v2_0_enable_clock_gating - enable VCN clock gating * @@ -1052,12 +901,8 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, true); - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { - r = vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram); - if (r) - return r; - goto jpeg; - } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram); vcn_v2_0_disable_static_power_gating(adev); @@ -1209,10 +1054,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); -jpeg: - r = jpeg_v2_0_start(adev); - - return r; + return 0; } static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev) @@ -1231,9 +1073,6 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev) tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); - tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); - tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); @@ -1252,10 +1091,6 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev) uint32_t tmp; int r; - r = jpeg_v2_0_stop(adev); - if (r) - return r; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { r = vcn_v2_0_stop_dpg_mode(adev); if (r) @@ -1781,56 +1616,6 @@ void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_ amdgpu_ring_write(ring, val); } -/** - * vcn_v2_0_jpeg_ring_get_rptr - get read pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware read pointer - */ -static uint64_t vcn_v2_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR); -} - -/** - * vcn_v2_0_jpeg_ring_get_wptr - get write pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware write pointer - */ -static uint64_t vcn_v2_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) - return adev->wb.wb[ring->wptr_offs]; - else - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); -} - -/** - * vcn_v2_0_jpeg_ring_set_wptr - set write pointer - * - * @ring: amdgpu_ring pointer - * - * Commits the write pointer to the hardware - */ -static void vcn_v2_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) { - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); - WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); - } else { - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); - } -} - /** * vcn_v2_0_jpeg_ring_insert_start - insert a start command * @@ -2071,9 +1856,6 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]); break; - case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->vcn.inst->ring_jpeg); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -2219,36 +2001,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_JPEG, - .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_0, - .get_rptr = vcn_v2_0_jpeg_ring_get_rptr, - .get_wptr = vcn_v2_0_jpeg_ring_get_wptr, - .set_wptr = vcn_v2_0_jpeg_ring_set_wptr, - .emit_frame_size = - SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */ - 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */ - 8 + 16, - .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */ - .emit_ib = vcn_v2_0_jpeg_ring_emit_ib, - .emit_fence = vcn_v2_0_jpeg_ring_emit_fence, - .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_jpeg_ring_test_ring, - .test_ib = amdgpu_vcn_jpeg_ring_test_ib, - .insert_nop = vcn_v2_0_jpeg_ring_nop, - .insert_start = vcn_v2_0_jpeg_ring_insert_start, - .insert_end = vcn_v2_0_jpeg_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg, - .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) { adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs; @@ -2265,12 +2017,6 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev) DRM_INFO("VCN encode is enabled in VM mode\n"); } -static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) -{ - adev->vcn.inst->ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs; - DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); -} - static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = { .set = vcn_v2_0_set_interrupt_state, .process = vcn_v2_0_process_interrupt, -- cgit From 18e6d4142bbb361681f8d228beb04305b393f7e9 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 13:17:52 -0500 Subject: drm/amdgpu: add JPEG PG and CG interface From JPEG2.0, it will use its own PG/CG Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/amd_shared.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index d5bc8be4d70c..d655a76bedc6 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -100,6 +100,7 @@ enum amd_powergating_state { #define AMD_CG_SUPPORT_IH_CG (1 << 27) #define AMD_CG_SUPPORT_ATHUB_LS (1 << 28) #define AMD_CG_SUPPORT_ATHUB_MGCG (1 << 29) +#define AMD_CG_SUPPORT_JPEG_MGCG (1 << 30) /* PG flags */ #define AMD_PG_SUPPORT_GFX_PG (1 << 0) #define AMD_PG_SUPPORT_GFX_SMG (1 << 1) @@ -118,6 +119,7 @@ enum amd_powergating_state { #define AMD_PG_SUPPORT_VCN (1 << 14) #define AMD_PG_SUPPORT_VCN_DPG (1 << 15) #define AMD_PG_SUPPORT_ATHUB (1 << 16) +#define AMD_PG_SUPPORT_JPEG (1 << 17) enum PP_FEATURE_MASK { PP_SCLK_DPM_MASK = 0x1, -- cgit From 099d66e43f049f8461fd330620e3a668098c7e03 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 11 Nov 2019 15:09:25 -0500 Subject: drm/amdgpu: add PG and CG for JPEG2.0 And enable them for Navi1x and Renoir Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 62 ++++++++++++++++++++-------------- drivers/gpu/drm/amd/amdgpu/nv.c | 8 ++++- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 ++ 3 files changed, 45 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 4143ef6905b8..3869730b2331 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -227,16 +227,18 @@ static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev) uint32_t data; int r = 0; - data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; - WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); - - SOC15_WAIT_ON_RREG(JPEG, 0, - mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); - - if (r) { - DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); - return r; + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); + + SOC15_WAIT_ON_RREG(JPEG, 0, + mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); + + if (r) { + DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); + return r; + } } /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */ @@ -248,24 +250,26 @@ static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev) static int jpeg_v2_0_enable_power_gating(struct amdgpu_device* adev) { - uint32_t data; - int r = 0; + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + uint32_t data; + int r = 0; - data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)); - data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK; - data |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF; - WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data); + data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)); + data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK; + data |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data); - data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; - WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); + data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); - SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS, - (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); + SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS, + (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); - if (r) { - DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); - return r; + if (r) { + DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); + return r; + } } return 0; @@ -276,7 +280,10 @@ static void jpeg_v2_0_disable_clock_gating(struct amdgpu_device* adev) uint32_t data; data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); - data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; @@ -296,7 +303,10 @@ static void jpeg_v2_0_enable_clock_gating(struct amdgpu_device* adev) uint32_t data; data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); - data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index af68f9815f28..7c1068efe651 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -636,10 +636,12 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_ATHUB; adev->external_rev_id = adev->rev_id + 0x1; break; @@ -656,9 +658,11 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 20; break; @@ -677,9 +681,11 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_ATHUB_LS | - AMD_CG_SUPPORT_VCN_MGCG; + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_ATHUB; adev->external_rev_id = adev->rev_id + 0xa; break; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8e1640bc07af..0c36cb784009 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1229,12 +1229,14 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_ROM_MGCG | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | AMD_CG_SUPPORT_IH_CG | AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_DF_MGCG; adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0x91; break; -- cgit From eedd4f024235e2a847cf7aea5be4695a746f8701 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 13:54:33 -0500 Subject: drm/amd/powerplay: add JPEG Powerplay interface It will be used for different SMU specific to HW Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 999445c5c010..cdd46cdaffb8 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -282,6 +282,7 @@ struct smu_power_gate { bool uvd_gated; bool vce_gated; bool vcn_gated; + bool jpeg_gated; }; struct smu_power_context { @@ -435,6 +436,7 @@ struct pptable_funcs { int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); int (*dpm_set_uvd_enable)(struct smu_context *smu, bool enable); int (*dpm_set_vce_enable)(struct smu_context *smu, bool enable); + int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable); int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size); int (*pre_display_config_changed)(struct smu_context *smu); @@ -489,6 +491,7 @@ struct pptable_funcs { int (*check_fw_version)(struct smu_context *smu); int (*powergate_sdma)(struct smu_context *smu, bool gate); int (*powergate_vcn)(struct smu_context *smu, bool gate); + int (*powergate_jpeg)(struct smu_context *smu, bool gate); int (*set_gfx_cgpg)(struct smu_context *smu, bool enable); int (*write_pptable)(struct smu_context *smu); int (*set_min_dcef_deep_sleep)(struct smu_context *smu); -- cgit From 43717ff656ee269848d9652f4a240e85298f53f8 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 14:11:01 -0500 Subject: drm/amd/powerplay: add JPEG power control for Navi1x By separating the JPEG power feature, and using its own PowerUp and PowerDown messages v2: remove PowerUpJpeg message argument Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 32 ++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 14be350a6127..95eeb44d0004 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -384,8 +384,10 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT); if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN) - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT) - | FEATURE_MASK(FEATURE_JPEG_PG_BIT); + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT); + + if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT); /* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */ if (is_asic_secure(smu)) { @@ -665,6 +667,31 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) return ret; } +static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (enable) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg); + if (ret) + return ret; + } + power_gate->jpeg_gated = false; + } else { + if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg); + if (ret) + return ret; + } + power_gate->jpeg_gated = true; + } + + return ret; +} + static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value) @@ -1995,6 +2022,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_allowed_feature_mask = navi10_get_allowed_feature_mask, .set_default_dpm_table = navi10_set_default_dpm_table, .dpm_set_uvd_enable = navi10_dpm_set_uvd_enable, + .dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable, .get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table, .print_clk_levels = navi10_print_clk_levels, .force_clk_levels = navi10_force_clk_levels, -- cgit From 27f7ff327dbf230098bb05dda9217cc3e5bef6a7 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 14:22:06 -0500 Subject: drm/amd/powerplay: add Powergate JPEG for Renoir Similar to SDMA, VCN etc. v2: add argument to both PowerUpJpeg and PowerDownJpeg messages Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 2 ++ drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h | 2 ++ drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 1 + drivers/gpu/drm/amd/powerplay/smu_internal.h | 2 ++ drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 11 +++++++++++ 5 files changed, 18 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 69243a858dd5..211934521d37 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1229,6 +1229,7 @@ static int smu_hw_init(void *handle) if (adev->flags & AMD_IS_APU) { smu_powergate_sdma(&adev->smu, false); smu_powergate_vcn(&adev->smu, false); + smu_powergate_jpeg(&adev->smu, false); smu_set_gfx_cgpg(&adev->smu, true); } @@ -1287,6 +1288,7 @@ static int smu_hw_fini(void *handle) if (adev->flags & AMD_IS_APU) { smu_powergate_sdma(&adev->smu, true); smu_powergate_vcn(&adev->smu, true); + smu_powergate_jpeg(&adev->smu, true); } ret = smu_stop_thermal_control(smu); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index 9b9f5df0911c..1745e0146fba 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -58,6 +58,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate); int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate); +int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate); + int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable); uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 04daf7e9fe05..492a201554e8 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -697,6 +697,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .check_fw_version = smu_v12_0_check_fw_version, .powergate_sdma = smu_v12_0_powergate_sdma, .powergate_vcn = smu_v12_0_powergate_vcn, + .powergate_jpeg = smu_v12_0_powergate_jpeg, .send_smc_msg = smu_v12_0_send_msg, .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, .read_smc_arg = smu_v12_0_read_arg, diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index 8bcda7871309..70c4d66721cd 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -42,6 +42,8 @@ ((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0) #define smu_powergate_vcn(smu, gate) \ ((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0) +#define smu_powergate_jpeg(smu, gate) \ + ((smu)->ppt_funcs->powergate_jpeg ? (smu)->ppt_funcs->powergate_jpeg((smu), (gate)) : 0) #define smu_get_vbios_bootup_values(smu) \ ((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 139dd737eaa5..18b24f954380 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -203,6 +203,17 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn); } +int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) +{ + if (!(smu->adev->flags & AMD_IS_APU)) + return 0; + + if (gate) + return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); + else + return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); +} + int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) { if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) -- cgit From a986e15127131b3cbf2fc341505805679c42df7d Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 14:33:10 -0500 Subject: drm/amd/powerplay: add JPEG power control for Renoir By using its own JPEG PowerUp and PowerDown messages v2: add argument to PowerDownJpeg message Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 492a201554e8..784903a313b7 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -301,6 +301,31 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable) return ret; } +static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (enable) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); + if (ret) + return ret; + } + power_gate->jpeg_gated = false; + } else { + if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); + if (ret) + return ret; + } + power_gate->jpeg_gated = true; + } + + return ret; +} + static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) { int ret = 0, i = 0; @@ -683,6 +708,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .print_clk_levels = renoir_print_clk_levels, .get_current_power_state = renoir_get_current_power_state, .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable, + .dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable, .force_dpm_limit_value = renoir_force_dpm_limit_value, .unforce_dpm_levels = renoir_unforce_dpm_levels, .get_workload_type = renoir_get_workload_type, -- cgit From 0db2ab99c9fbbd29ea54505027ce16a91da9efff Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 14:38:08 -0500 Subject: drm/amd/powerplay: set JPEG to SMU dpm By using its own IP block type. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 3 +++ drivers/gpu/drm/amd/powerplay/smu_internal.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 211934521d37..a1453157eefe 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -415,6 +415,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, case AMD_IP_BLOCK_TYPE_SDMA: ret = smu_powergate_sdma(smu, gate); break; + case AMD_IP_BLOCK_TYPE_JPEG: + ret = smu_dpm_set_jpeg_enable(smu, gate); + break; default: break; } diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index 70c4d66721cd..b2d81d3490cd 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -172,6 +172,8 @@ ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0) #define smu_dpm_set_vce_enable(smu, enable) \ ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) +#define smu_dpm_set_jpeg_enable(smu, enable) \ + ((smu)->ppt_funcs->dpm_set_jpeg_enable ? (smu)->ppt_funcs->dpm_set_jpeg_enable((smu), (enable)) : 0) #define smu_set_watermarks_table(smu, tab, clock_ranges) \ ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) -- cgit From 474b6d296f237da1b05d5ef8749c3d7ab9f87682 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Tue, 12 Nov 2019 11:57:36 -0500 Subject: drm/amdgpu: enable JPEG2.0 dpm By using its own enabling function Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 12 ++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h | 1 + drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 10 +++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index f205f56e3358..0c7324bc31a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -2718,6 +2718,18 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev) } +void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) +{ + int ret = 0; + + if (is_support_sw_smu(adev)) { + ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_JPEG, enable); + if (ret) + DRM_ERROR("[SW SMU]: dpm enable jpeg failed, state = %s, ret = %d. \n", + enable ? "true" : "false", ret); + } +} + int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev) { int ret = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h index ef31448ee8d8..3da1da277805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h @@ -41,5 +41,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev); void amdgpu_dpm_thermal_work_handler(struct work_struct *work); void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); +void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 3869730b2331..a78292d84854 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -333,6 +333,9 @@ static int jpeg_v2_0_start(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; int r; + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, true); + /* disable power gating */ r = jpeg_v2_0_disable_power_gating(adev); if (r) @@ -388,8 +391,13 @@ static int jpeg_v2_0_stop(struct amdgpu_device *adev) /* enable power gating */ r = jpeg_v2_0_enable_power_gating(adev); + if (r) + return r; - return r; + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, false); + + return 0; } /** -- cgit From 52f2e779ad86daf6eb39f02eaab94b7326a546cb Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 15:00:58 -0500 Subject: drm/amdgpu: add driver support for JPEG2.0 and above By using JPEG IP block type Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 9 +++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0ad61febbb5f..cdd8ddab8f78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1961,6 +1961,7 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* enable clockgating to save power */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, @@ -1991,6 +1992,7 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_powergating_state) { /* enable powergating to save power */ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 58fb75cb28ea..39e37a53cb90 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -400,7 +400,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, ib_size_alignment = 1; break; case AMDGPU_HW_IP_VCN_JPEG: - type = AMD_IP_BLOCK_TYPE_VCN; + type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? + AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { if (adev->jpeg.harvest_config & (1 << i)) continue; @@ -521,9 +523,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_VCN_DEC: case AMDGPU_HW_IP_VCN_ENC: - case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; break; + case AMDGPU_HW_IP_VCN_JPEG: + type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? + AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; + break; default: return -EINVAL; } -- cgit From 5be45a26c9fbb019814c611932fee391d1cfb364 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 8 Nov 2019 15:01:42 -0500 Subject: drm/amdgpu: enable JPEG2.0 for Navi1x and Renoir By adding JPEG IP block to the family Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 3 +++ drivers/gpu/drm/amd/amdgpu/soc15.c | 2 ++ 2 files changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 7c1068efe651..d2989e9484bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -52,6 +52,7 @@ #include "gfx_v10_0.h" #include "sdma_v5_0.h" #include "vcn_v2_0.h" +#include "jpeg_v2_0.h" #include "dce_virtual.h" #include "mes_v10_1.h" #include "mxgpu_nv.h" @@ -456,6 +457,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); if (adev->enable_mes) amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); break; @@ -479,6 +481,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0c36cb784009..f3977abbd1e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -67,6 +67,7 @@ #include "vce_v4_0.h" #include "vcn_v1_0.h" #include "vcn_v2_0.h" +#include "jpeg_v2_0.h" #include "vcn_v2_5.h" #include "dce_virtual.h" #include "mxgpu_ai.h" @@ -821,6 +822,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; default: return -EINVAL; -- cgit From 14f43e8f88c57bbeaed3d05f13efab733f5e2338 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 11 Nov 2019 09:56:32 -0500 Subject: drm/amdgpu: move JPEG2.5 out from VCN2.5 And clean up the duplicated stuff Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 3 + drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 105 ----- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 5 - drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 641 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h | 29 ++ drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 236 ------------ drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h | 13 - drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 246 +----------- 9 files changed, 679 insertions(+), 602 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c create mode 100644 drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index c48b3bd73dfe..7ae3b22c5628 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -155,7 +155,8 @@ amdgpu-y += \ vcn_v2_5.o \ amdgpu_jpeg.o \ jpeg_v1_0.o \ - jpeg_v2_0.o + jpeg_v2_0.o \ + jpeg_v2_5.o # add ATHUB block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 5e2e06ec13df..5131a0a1bc8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -26,6 +26,9 @@ #define AMDGPU_MAX_JPEG_INSTANCES 2 +#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0) +#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1) + struct amdgpu_jpeg_reg{ unsigned jpeg_pitch; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 4715115c8f06..428cfd58b37d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -706,108 +706,3 @@ error: amdgpu_bo_unref(&bo); return r; } - -int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t tmp = 0; - unsigned i; - int r; - - WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD); - r = amdgpu_ring_alloc(ring, 3); - if (r) - return r; - - amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0)); - amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_commit(ring); - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch); - if (tmp == 0xDEADBEEF) - break; - udelay(1); - } - - if (i >= adev->usec_timeout) - r = -ETIMEDOUT; - - return r; -} - -static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_job *job; - struct amdgpu_ib *ib; - struct dma_fence *f = NULL; - const unsigned ib_size_dw = 16; - int i, r; - - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); - if (r) - return r; - - ib = &job->ibs[0]; - - ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); - ib->ptr[1] = 0xDEADBEEF; - for (i = 2; i < 16; i += 2) { - ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); - ib->ptr[i+1] = 0; - } - ib->length_dw = 16; - - r = amdgpu_job_submit_direct(job, ring, &f); - if (r) - goto err; - - if (fence) - *fence = dma_fence_get(f); - dma_fence_put(f); - - return 0; - -err: - amdgpu_job_free(job); - return r; -} - -int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t tmp = 0; - unsigned i; - struct dma_fence *fence = NULL; - long r = 0; - - r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence); - if (r) - goto error; - - r = dma_fence_wait_timeout(fence, false, timeout); - if (r == 0) { - r = -ETIMEDOUT; - goto error; - } else if (r < 0) { - goto error; - } else { - r = 0; - } - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch); - if (tmp == 0xDEADBEEF) - break; - udelay(1); - } - - if (i >= adev->usec_timeout) - r = -ETIMEDOUT; - - dma_fence_put(fence); -error: - return r; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index dface275c81a..402a5046b985 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -158,7 +158,6 @@ struct amdgpu_vcn_reg{ unsigned ib_size; unsigned gp_scratch8; unsigned scratch9; - unsigned jpeg_pitch; }; struct amdgpu_vcn_inst { @@ -168,7 +167,6 @@ struct amdgpu_vcn_inst { void *saved_bo; struct amdgpu_ring ring_dec; struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; - struct amdgpu_ring ring_jpeg; struct amdgpu_irq_src irq; struct amdgpu_vcn_reg external; }; @@ -209,7 +207,4 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout); -int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring); -int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c new file mode 100644 index 000000000000..2c58939e6ad0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -0,0 +1,641 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "soc15.h" +#include "soc15d.h" +#include "jpeg_v2_0.h" + +#include "vcn/vcn_2_5_offset.h" +#include "vcn/vcn_2_5_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f + +#define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2 + +static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev); +static int jpeg_v2_5_set_powergating_state(void *handle, + enum amd_powergating_state state); + +static int amdgpu_ih_clientid_jpeg[] = { + SOC15_IH_CLIENTID_VCN, + SOC15_IH_CLIENTID_VCN1 +}; + +/** + * jpeg_v2_5_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int jpeg_v2_5_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->asic_type == CHIP_ARCTURUS) { + u32 harvest; + int i; + + adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS; + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING); + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + adev->jpeg.harvest_config |= 1 << i; + } + + if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 | + AMDGPU_JPEG_HARVEST_JPEG1)) + return -ENOENT; + } else + adev->jpeg.num_jpeg_inst = 1; + + jpeg_v2_5_set_dec_ring_funcs(adev); + jpeg_v2_5_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v2_5_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int jpeg_v2_5_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + int i, r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], + VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq); + if (r) + return r; + } + + r = amdgpu_jpeg_sw_init(adev); + if (r) + return r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; + sprintf(ring->name, "jpeg_dec_%d", i); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); + } + + return 0; +} + +/** + * jpeg_v2_5_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG suspend and free up sw allocation + */ +static int jpeg_v2_5_sw_fini(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_jpeg_suspend(adev); + if (r) + return r; + + r = amdgpu_jpeg_sw_fini(adev); + + return r; +} + +/** + * jpeg_v2_5_hw_init - start and test JPEG block + * + * @handle: amdgpu_device pointer + * + */ +static int jpeg_v2_5_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int i, r; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i); + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } + + DRM_INFO("JPEG decode initialized successfully.\n"); + + return 0; +} + +/** + * jpeg_v2_5_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the JPEG block, mark ring as not ready any more + */ +static int jpeg_v2_5_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) + jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + ring->sched.ready = false; + } + + return 0; +} + +/** + * jpeg_v2_5_suspend - suspend JPEG block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend JPEG block + */ +static int jpeg_v2_5_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = jpeg_v2_5_hw_fini(adev); + if (r) + return r; + + r = amdgpu_jpeg_suspend(adev); + + return r; +} + +/** + * jpeg_v2_5_resume - resume JPEG block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init JPEG block + */ +static int jpeg_v2_5_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + r = jpeg_v2_5_hw_init(adev); + + return r; +} + +static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE); + data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JPEG_ENC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL); + data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK + | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK + | JPEG_CGC_CTRL__JMCIF_MODE_MASK + | JPEG_CGC_CTRL__JRBBM_MODE_MASK); + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data); +} + +static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device* adev, int inst) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE); + data |= (JPEG_CGC_GATE__JPEG_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JPEG_ENC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); +} + +/** + * jpeg_v2_5_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v2_5_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + /* JPEG disable CGC */ + jpeg_v2_5_disable_clock_gating(adev, i); + + /* MJPEG global tiling registers */ + WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR); + } + + return 0; +} + +/** + * jpeg_v2_5_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v2_5_stop(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + jpeg_v2_5_enable_clock_gating(adev, i); + + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + } + + return 0; +} + +/** + * jpeg_v2_5_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v2_5_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v2_5_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +static bool jpeg_v2_5_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 1; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) & + UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); + } + + return ret; +} + +static int jpeg_v2_5_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 0; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret); + if (ret) + return ret; + } + + return ret; +} + +static int jpeg_v2_5_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + if (enable) { + if (jpeg_v2_5_is_idle(handle)) + return -EBUSY; + jpeg_v2_5_enable_clock_gating(adev, i); + } else { + jpeg_v2_5_disable_clock_gating(adev, i); + } + } + + return 0; +} + +static int jpeg_v2_5_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if(state == adev->jpeg.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = jpeg_v2_5_stop(adev); + else + ret = jpeg_v2_5_start(adev); + + if(!ret) + adev->jpeg.cur_state = state; + + return ret; +} + +static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t ip_instance; + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_VCN: + ip_instance = 0; + break; + case SOC15_IH_CLIENTID_VCN1: + ip_instance = 1; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + + DRM_DEBUG("IH: JPEG TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { + .name = "jpeg_v2_5", + .early_init = jpeg_v2_5_early_init, + .late_init = NULL, + .sw_init = jpeg_v2_5_sw_init, + .sw_fini = jpeg_v2_5_sw_fini, + .hw_init = jpeg_v2_5_hw_init, + .hw_fini = jpeg_v2_5_hw_fini, + .suspend = jpeg_v2_5_suspend, + .resume = jpeg_v2_5_resume, + .is_idle = jpeg_v2_5_is_idle, + .wait_for_idle = jpeg_v2_5_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v2_5_set_clockgating_state, + .set_powergating_state = jpeg_v2_5_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_1, + .get_rptr = jpeg_v2_5_dec_ring_get_rptr, + .get_wptr = jpeg_v2_5_dec_ring_get_wptr, + .set_wptr = jpeg_v2_5_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */ + 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */ + .emit_ib = jpeg_v2_0_dec_ring_emit_ib, + .emit_fence = jpeg_v2_0_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v2_0_dec_ring_nop, + .insert_start = jpeg_v2_0_dec_ring_insert_start, + .insert_end = jpeg_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs; + adev->jpeg.inst[i].ring_dec.me = i; + DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i); + } +} + +static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { + .set = jpeg_v2_5_set_interrupt_state, + .process = jpeg_v2_5_process_interrupt, +}; + +static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + adev->jpeg.inst[i].irq.num_types = 1; + adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; + } +} + +const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 2, + .minor = 5, + .rev = 0, + .funcs = &jpeg_v2_5_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h new file mode 100644 index 000000000000..2b4087c02620 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V2_5_H__ +#define __JPEG_V2_5_H__ + +extern const struct amdgpu_ip_block_version jpeg_v2_5_ip_block; + +#endif /* __JPEG_V2_5_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index ded0ab574f4e..5649190cb629 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -47,26 +47,6 @@ #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2 -#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff -#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029 -#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a -#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b -#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea -#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb -#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf -#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1 -#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8 -#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9 -#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082 -#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec -#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed -#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085 -#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 -#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 -#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f - -#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 - #define mmUVD_RBC_XX_IB_REG_CHECK 0x026b #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 #define mmUVD_REG_XX_MASK 0x026c @@ -1616,222 +1596,6 @@ void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_ amdgpu_ring_write(ring, val); } -/** - * vcn_v2_0_jpeg_ring_insert_start - insert a start command - * - * @ring: amdgpu_ring pointer - * - * Write a start command to the ring. - */ -void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring) -{ - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x80010000); -} - -/** - * vcn_v2_0_jpeg_ring_insert_end - insert a end command - * - * @ring: amdgpu_ring pointer - * - * Write a end command to the ring. - */ -void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring) -{ - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x00010000); -} - -/** - * vcn_v2_0_jpeg_ring_emit_fence - emit an fence & trap command - * - * @ring: amdgpu_ring pointer - * @fence: fence to emit - * - * Write a fence and a trap command to the ring. - */ -void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags) -{ - WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x8); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, - 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x3fbc); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x1); - - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); - amdgpu_ring_write(ring, 0); -} - -/** - * vcn_v2_0_jpeg_ring_emit_ib - execute indirect buffer - * - * @ring: amdgpu_ring pointer - * @ib: indirect buffer to execute - * - * Write ring commands to execute the indirect buffer. - */ -void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_job *job, - struct amdgpu_ib *ib, - uint32_t flags) -{ - unsigned vmid = AMDGPU_JOB_GET_VMID(job); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, ib->length_dw); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x2); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET, - 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); - amdgpu_ring_write(ring, 0x2); -} - -void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask) -{ - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, val); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE3)); - } - amdgpu_ring_write(ring, mask); -} - -void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) -{ - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t data0, data1, mask; - - pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); - - /* wait for register write */ - data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; - data1 = lower_32_bits(pd_addr); - mask = 0xffffffff; - vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask); -} - -void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) -{ - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - } - amdgpu_ring_write(ring, val); -} - -void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count) -{ - int i; - - WARN_ON(ring->wptr % 2 || count % 2); - - for (i = 0; i < count / 2; i++) { - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); - amdgpu_ring_write(ring, 0); - } -} - static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h index 8467292f32e5..ef749b02ded9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h @@ -49,19 +49,6 @@ extern void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr); extern void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); -extern void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring); -extern void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring); -extern void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags); -extern void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, - struct amdgpu_ib *ib, uint32_t flags); -extern void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask); -extern void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr); -extern void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); -extern void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count); - extern const struct amdgpu_ip_block_version vcn_v2_0_ip_block; #endif /* __VCN_V2_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 03083e5f731a..451dc814d845 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -47,13 +47,10 @@ #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c -#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f - -#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2 +#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev); -static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v2_5_set_powergating_state(void *handle, enum amd_powergating_state state); @@ -95,7 +92,6 @@ static int vcn_v2_5_early_init(void *handle) vcn_v2_5_set_dec_ring_funcs(adev); vcn_v2_5_set_enc_ring_funcs(adev); - vcn_v2_5_set_jpeg_ring_funcs(adev); vcn_v2_5_set_irq_funcs(adev); return 0; @@ -130,12 +126,6 @@ static int vcn_v2_5_sw_init(void *handle) if (r) return r; } - - /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], - VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq); - if (r) - return r; } r = amdgpu_vcn_sw_init(adev); @@ -184,9 +174,6 @@ static int vcn_v2_5_sw_init(void *handle) adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP); - adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH); - ring = &adev->vcn.inst[j].ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j; @@ -204,14 +191,6 @@ static int vcn_v2_5_sw_init(void *handle) if (r) return r; } - - ring = &adev->vcn.inst[j].ring_jpeg; - ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j; - sprintf(ring->name, "vcn_jpeg_%d", j); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); - if (r) - return r; } return 0; @@ -269,12 +248,8 @@ static int vcn_v2_5_hw_init(void *handle) if (r) goto done; } - - ring = &adev->vcn.inst[j].ring_jpeg; - r = amdgpu_ring_test_helper(ring); - if (r) - goto done; } + done: if (!r) DRM_INFO("VCN decode and encode initialized successfully.\n"); @@ -309,9 +284,6 @@ static int vcn_v2_5_hw_fini(void *handle) ring = &adev->vcn.inst[i].ring_enc[i]; ring->sched.ready = false; } - - ring = &adev->vcn.inst[i].ring_jpeg; - ring->sched.ready = false; } return 0; @@ -592,115 +564,6 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) } } -/** - * jpeg_v2_5_start - start JPEG block - * - * @adev: amdgpu_device pointer - * - * Setup and start the JPEG block - */ -static int jpeg_v2_5_start(struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring; - uint32_t tmp; - int i; - - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - ring = &adev->vcn.inst[i].ring_jpeg; - /* disable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - - /* JPEG disable CGC */ - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL); - tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp); - - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE); - tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK - | JPEG_CGC_GATE__JPEG2_DEC_MASK - | JPEG_CGC_GATE__JMCIF_MASK - | JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp); - - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL); - tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK - | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK - | JPEG_CGC_CTRL__JMCIF_MODE_MASK - | JPEG_CGC_CTRL__JRBBM_MODE_MASK); - WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp); - - /* MJPEG global tiling registers */ - WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - - /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC_MASK, - ~JPEG_SYS_INT_EN__DJRBC_MASK); - - WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); - WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); - ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR); - } - - return 0; -} - -/** - * jpeg_v2_5_stop - stop JPEG block - * - * @adev: amdgpu_device pointer - * - * stop the JPEG block - */ -static int jpeg_v2_5_stop(struct amdgpu_device *adev) -{ - uint32_t tmp; - int i; - - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE); - tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK - |JPEG_CGC_GATE__JPEG2_DEC_MASK - |JPEG_CGC_GATE__JMCIF_MASK - |JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp); - - /* enable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), - UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - } - - return 0; -} - static int vcn_v2_5_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; @@ -874,19 +737,14 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4); } - r = jpeg_v2_5_start(adev); - return r; + return 0; } static int vcn_v2_5_stop(struct amdgpu_device *adev) { uint32_t tmp; - int i, r; - - r = jpeg_v2_5_stop(adev); - if (r) - return r; + int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) @@ -1125,86 +983,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -/** - * vcn_v2_5_jpeg_ring_get_rptr - get read pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware read pointer - */ -static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR); -} - -/** - * vcn_v2_5_jpeg_ring_get_wptr - get write pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware write pointer - */ -static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) - return adev->wb.wb[ring->wptr_offs]; - else - return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR); -} - -/** - * vcn_v2_5_jpeg_ring_set_wptr - set write pointer - * - * @ring: amdgpu_ring pointer - * - * Commits the write pointer to the hardware - */ -static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) { - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); - WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); - } else { - WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); - } -} - -static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_JPEG, - .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_1, - .get_rptr = vcn_v2_5_jpeg_ring_get_rptr, - .get_wptr = vcn_v2_5_jpeg_ring_get_wptr, - .set_wptr = vcn_v2_5_jpeg_ring_set_wptr, - .emit_frame_size = - SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */ - 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */ - 8 + 16, - .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */ - .emit_ib = vcn_v2_0_jpeg_ring_emit_ib, - .emit_fence = vcn_v2_0_jpeg_ring_emit_fence, - .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_jpeg_ring_test_ring, - .test_ib = amdgpu_vcn_jpeg_ring_test_ib, - .insert_nop = vcn_v2_0_jpeg_ring_nop, - .insert_start = vcn_v2_0_jpeg_ring_insert_start, - .insert_end = vcn_v2_0_jpeg_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg, - .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) { int i; @@ -1233,19 +1011,6 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) } } -static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; - adev->vcn.inst[i].ring_jpeg.me = i; - DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i); - } -} - static bool vcn_v2_5_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1352,9 +1117,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); break; - case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); -- cgit From e89e2237e89b7c519bfee67242775206611cd5af Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 11 Nov 2019 10:27:03 -0500 Subject: drm/amdgpu: enable Arcturus CG for VCN and JPEG blocks Arcturus VCN and JPEG only got CG support, and no PG support Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f3977abbd1e2..b404b7a6e593 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1210,7 +1210,9 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_MC_LS | - AMD_CG_SUPPORT_IH_CG; + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x32; break; -- cgit From 8c74e590497c5c593a917fd6bf33ab548d886952 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 11 Nov 2019 10:33:57 -0500 Subject: drm/amdgpu: enable Arcturus JPEG2.5 block It also doen't care about FW loading type, so enabling it directly. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index b404b7a6e593..689ffa6ede57 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -69,6 +69,7 @@ #include "vcn_v2_0.h" #include "jpeg_v2_0.h" #include "vcn_v2_5.h" +#include "jpeg_v2_5.h" #include "dce_virtual.h" #include "mxgpu_ai.h" #include "amdgpu_smu.h" @@ -804,6 +805,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); break; case CHIP_RENOIR: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); -- cgit From c39f062e881dcc6ab4c1c1c5835dc774be1bcfd6 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 11 Nov 2019 17:15:02 +0800 Subject: drm/amd/powerplay: avoid DPM reenable process on Navi1x ASICs V2 Otherwise, without RLC reinitialization, the DPM reenablement will fail. That affects the custom pptable uploading. V2: setting/clearing uploading_custom_pp_table in smu_sys_set_pp_table() Reported-by: Matt Coffin Signed-off-by: Evan Quan Tested-by: Matt Coffin Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 31 ++++++++++++++++++++++---- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 + 2 files changed, 28 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index a1453157eefe..223c5a794561 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -594,10 +594,18 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) smu_table->power_play_table = smu_table->hardcode_pptable; smu_table->power_play_table_size = size; + /* + * Special hw_fini action(for Navi1x, the DPMs disablement will be + * skipped) may be needed for custom pptable uploading. + */ + smu->uploading_custom_pp_table = true; + ret = smu_reset(smu); if (ret) pr_info("smu reset failed, ret = %d\n", ret); + smu->uploading_custom_pp_table = false; + failed: mutex_unlock(&smu->mutex); return ret; @@ -1300,10 +1308,25 @@ static int smu_hw_fini(void *handle) return ret; } - ret = smu_stop_dpms(smu); - if (ret) { - pr_warn("Fail to stop Dpms!\n"); - return ret; + /* + * For custom pptable uploading, skip the DPM features + * disable process on Navi1x ASICs. + * - As the gfx related features are under control of + * RLC on those ASICs. RLC reinitialization will be + * needed to reenable them. That will cost much more + * efforts. + * + * - SMU firmware can handle the DPM reenablement + * properly. + */ + if (!smu->uploading_custom_pp_table || + !((adev->asic_type >= CHIP_NAVI10) && + (adev->asic_type <= CHIP_NAVI12))) { + ret = smu_stop_dpms(smu); + if (ret) { + pr_warn("Fail to stop Dpms!\n"); + return ret; + } } kfree(table_context->driver_pptable); diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index cdd46cdaffb8..5bac7efcd6ee 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -391,6 +391,7 @@ struct smu_context uint32_t smc_if_version; + bool uploading_custom_pp_table; }; struct i2c_adapter; -- cgit From 0eeaa899263cf1f55ab7df46575015e6ba08dbb0 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 12 Nov 2019 14:18:54 +0800 Subject: drm/amd/powerplay: issue BTC on Navi during SMU setup RunBTC is added for Navi ASIC on hardware setup. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 95eeb44d0004..b759226ec67a 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -2007,6 +2007,17 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL return ret; } +static int navi10_run_btc(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc); + if (ret) + pr_err("RunBtc failed!\n"); + + return ret; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -2099,6 +2110,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .set_default_od_settings = navi10_set_default_od_settings, .od_edit_dpm_table = navi10_od_edit_dpm_table, .get_pptable_power_limit = navi10_get_pptable_power_limit, + .run_btc = navi10_run_btc, }; void navi10_set_ppt_funcs(struct smu_context *smu) -- cgit From 93a09aa494e959219f925f3847ef8c836143c5fd Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 14 Nov 2019 15:30:39 +0800 Subject: drm/amd/powerplay: issue no PPSMC_MSG_GetCurrPkgPwr on unsupported ASICs Otherwise, the error message prompted will confuse user. Signed-off-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 621cf1ade981..f754fbd70f68 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3477,18 +3477,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) { + struct amdgpu_device *adev = hwmgr->adev; int i; u32 tmp = 0; if (!query) return -EINVAL; - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); - tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - *query = tmp; + /* + * PPSMC_MSG_GetCurrPkgPwr is not supported on: + * - Hawaii + * - Bonaire + * - Fiji + * - Tonga + */ + if ((adev->asic_type != CHIP_HAWAII) && + (adev->asic_type != CHIP_BONAIRE) && + (adev->asic_type != CHIP_FIJI) && + (adev->asic_type != CHIP_TONGA)) { + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); + tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + *query = tmp; - if (tmp != 0) - return 0; + if (tmp != 0) + return 0; + } smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, -- cgit From 09ba2e7d68f8c24ef3c7469d6632818c535bcb2c Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 14 Nov 2019 16:58:31 +0800 Subject: drm/amd/powerplay: correct fine grained dpm force level setting For fine grained dpm, there is only two levels supported. However to reflect correctly the current clock frequency, there is an intermediate level faked. Thus on forcing level setting, we need to treat level 2 correctly as level 1. Signed-off-by: Evan Quan Reviewed-by: Kevin Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index b759226ec67a..bc44bc4e0b9d 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -886,6 +886,12 @@ static int navi10_force_clk_levels(struct smu_context *smu, case SMU_UCLK: case SMU_DCEFCLK: case SMU_FCLK: + /* There is only 2 levels for fine grained DPM */ + if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { + soft_max_level = (soft_max_level >= 1 ? 1 : 0); + soft_min_level = (soft_min_level >= 1 ? 1 : 0); + } + ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) return size; -- cgit From d191bd678153307573d615bb42da4fcca19fe477 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Thu, 14 Nov 2019 11:20:25 +0800 Subject: drm/amdkfd: remove set but not used variable 'top_dev' Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/amdkfd/kfd_iommu.c: In function kfd_iommu_device_init: drivers/gpu/drm/amd/amdkfd/kfd_iommu.c:65:30: warning: variable top_dev set but not used [-Wunused-but-set-variable] Reported-by: Hulk Robot Fixes: 1ae99eab34f9 ("drm/amdkfd: Initialize HSA_CAP_ATS_PRESENT capability in topology codes") Signed-off-by: zhengbin Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 193e2835bd4d..8d871514671e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -62,9 +62,6 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) struct amd_iommu_device_info iommu_info; unsigned int pasid_limit; int err; - struct kfd_topology_device *top_dev; - - top_dev = kfd_topology_device_by_id(kfd->id); if (!kfd->device_info->needs_iommu_device) return 0; -- cgit From 4effa8dbc11780de2a9f8c756d1c133fb862c697 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 15 Nov 2019 17:10:34 -0500 Subject: drm/amdgpu/vcn2.5: fix the enc loop with hw fini Signed-off-by: Leo Liu Reviewed-by: James Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 451dc814d845..42d6b9f0553b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -268,7 +268,7 @@ static int vcn_v2_5_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i; + int i, j; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) @@ -280,8 +280,8 @@ static int vcn_v2_5_hw_fini(void *handle) ring->sched.ready = false; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.inst[i].ring_enc[i]; + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { + ring = &adev->vcn.inst[i].ring_enc[j]; ring->sched.ready = false; } } -- cgit From d0d13fe874909542d2936056c0f8b36e70079570 Mon Sep 17 00:00:00 2001 From: Yintian Tao Date: Mon, 18 Nov 2019 16:06:00 +0800 Subject: drm/amdgpu: put flush_delayed_work at first MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is one regression from 042f3d7b745cd76aa To put flush_delayed_work after adev->shutdown = true which will make amdgpu_ih_process not response the irq At last, all ib ring tests will be failed just like below [drm] amdgpu: finishing device. [drm] Fence fallback timer expired on ring gfx [drm] Fence fallback timer expired on ring comp_1.0.0 [drm] Fence fallback timer expired on ring comp_1.1.0 [drm] Fence fallback timer expired on ring comp_1.2.0 [drm] Fence fallback timer expired on ring comp_1.3.0 [drm] Fence fallback timer expired on ring comp_1.0.1 amdgpu 0000:00:07.0: [drm:amdgpu_ib_ring_tests [amdgpu]] *ERROR* IB test failed on comp_1.1.1 (-110). amdgpu 0000:00:07.0: [drm:amdgpu_ib_ring_tests [amdgpu]] *ERROR* IB test failed on comp_1.2.1 (-110). amdgpu 0000:00:07.0: [drm:amdgpu_ib_ring_tests [amdgpu]] *ERROR* IB test failed on comp_1.3.1 (-110). amdgpu 0000:00:07.0: [drm:amdgpu_ib_ring_tests [amdgpu]] *ERROR* IB test failed on sdma0 (-110). amdgpu 0000:00:07.0: [drm:amdgpu_ib_ring_tests [amdgpu]] *ERROR* IB test failed on sdma1 (-110). amdgpu 0000:00:07.0: [drm:amdgpu_ib_ring_tests [amdgpu]] *ERROR* IB test failed on uvd_enc_0.0 (-110). amdgpu 0000:00:07.0: [drm:amdgpu_ib_ring_tests [amdgpu]] *ERROR* IB test failed on vce0 (-110). [drm:amdgpu_device_delayed_init_work_handler [amdgpu]] *ERROR* ib ring test failed (-110). v2: replace cancel_delayed_work_sync() with flush_delayed_work() Signed-off-by: Yintian Tao Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index cdd8ddab8f78..27ed48bde3fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3106,9 +3106,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) int r; DRM_INFO("amdgpu: finishing device.\n"); - adev->shutdown = true; - flush_delayed_work(&adev->delayed_init_work); + adev->shutdown = true; /* disable all interrupts */ amdgpu_irq_disable_all(adev); @@ -3127,7 +3126,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; } adev->accel_working = false; - cancel_delayed_work_sync(&adev->delayed_init_work); /* free i2c buses */ if (!amdgpu_device_has_dc_support(adev)) amdgpu_i2c_fini(adev); -- cgit From 761e09230c4e2b4a9d2ab7a195d3fe3915758ad3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 15 Oct 2019 16:21:27 -0400 Subject: drm/amdgpu/soc15: move struct definition around to align with other soc15 asics Move reset_method next to reset callback to match the struct layout and the other definition in this file. Reviewed-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 689ffa6ede57..44add7509fb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1011,6 +1011,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .read_bios_from_rom = &soc15_read_bios_from_rom, .read_register = &soc15_read_register, .reset = &soc15_asic_reset, + .reset_method = &soc15_asic_reset_method, .set_vga_state = &soc15_vga_set_state, .get_xclk = &soc15_get_xclk, .set_uvd_clocks = &soc15_set_uvd_clocks, @@ -1023,7 +1024,6 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .get_pcie_usage = &vega20_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, - .reset_method = &soc15_asic_reset_method }; static int soc15_common_early_init(void *handle) -- cgit From 29bc37b410964e3a0233a048b9a33db13a9e08b6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 13 Nov 2019 14:27:54 -0500 Subject: drm/amdgpu/nv: add asic func for fetching vbios from rom directly Needed as a fallback if the vbios can't be fetched by other means. Reviewed-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index d2989e9484bd..343f47995657 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -40,6 +40,7 @@ #include "gc/gc_10_1_0_sh_mask.h" #include "hdp/hdp_5_0_0_offset.h" #include "hdp/hdp_5_0_0_sh_mask.h" +#include "smuio/smuio_11_0_0_offset.h" #include "soc15.h" #include "soc15_common.h" @@ -157,8 +158,27 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev) static bool nv_read_bios_from_rom(struct amdgpu_device *adev, u8 *bios, u32 length_bytes) { - /* TODO: will implement it when SMU header is available */ - return false; + u32 *dw_ptr; + u32 i, length_dw; + + if (bios == NULL) + return false; + if (length_bytes == 0) + return false; + /* APU vbios image is part of sbios image */ + if (adev->flags & AMD_IS_APU) + return false; + + dw_ptr = (u32 *)bios; + length_dw = ALIGN(length_bytes, 4) / 4; + + /* set rom index to 0 */ + WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + /* read out the rom data */ + for (i = 0; i < length_dw; i++) + dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + + return true; } static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { -- cgit From d3b65841b31c0192f997e0f9bc64dccbfaa97bcc Mon Sep 17 00:00:00 2001 From: zhengbin Date: Thu, 14 Nov 2019 20:36:24 +0800 Subject: drm/amd/display: remove set but not used variable 'old_plane_crtc' Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c: In function dm_determine_update_type_for_commit: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c:6516:36: warning: variable old_plane_crtc set but not used [-Wunused-but-set-variable] It is introduced by commit a87fa9938749 ("drm/amd/display: Build stream update and plane updates in dm"), but never used, so remove it. Reviewed-by: Harry Wentland Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c2700bc28c02..700cbd519f86 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7611,7 +7611,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, int i, j, num_plane, ret = 0; struct drm_plane_state *old_plane_state, *new_plane_state; struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state; - struct drm_crtc *new_plane_crtc, *old_plane_crtc; + struct drm_crtc *new_plane_crtc; struct drm_plane *plane; struct drm_crtc *crtc; @@ -7657,7 +7657,6 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, uint64_t tiling_flags; new_plane_crtc = new_plane_state->crtc; - old_plane_crtc = old_plane_state->crtc; new_dm_plane_state = to_dm_plane_state(new_plane_state); old_dm_plane_state = to_dm_plane_state(old_plane_state); -- cgit From 589d8d282ebe1eab2dd8b1fba3e60322787a50e6 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Thu, 14 Nov 2019 20:36:25 +0800 Subject: drm/amd/display: remove set but not used variable 'bp' in bios_parser2.c Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c: In function bios_get_board_layout_info: drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c:1826:22: warning: variable bp set but not used [-Wunused-but-set-variable] It is introduced by commit 1eeedbcc20d6 ("drm/amd/display: get board layout for edid emulation"), but never used, so remove it. Reviewed-by: Harry Wentland Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 6e29ba8e582e..9dc8d4e779ca 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1838,7 +1838,6 @@ static enum bp_result bios_get_board_layout_info( struct board_layout_info *board_layout_info) { unsigned int i; - struct bios_parser *bp; enum bp_result record_result; const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = { @@ -1847,7 +1846,6 @@ static enum bp_result bios_get_board_layout_info( 0, 0 }; - bp = BP_FROM_DCB(dcb); if (board_layout_info == NULL) { DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n"); return BP_RESULT_BADINPUT; -- cgit From 7e30402bed151fc6222baafe5aa1abe3e65c3065 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Thu, 14 Nov 2019 20:36:26 +0800 Subject: drm/amd/display: remove set but not used variable 'bp' in bios_parser.c Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/display/dc/bios/bios_parser.c: In function bios_get_board_layout_info: drivers/gpu/drm/amd/display/dc/bios/bios_parser.c:2743:22: warning: variable bp set but not used [-Wunused-but-set-variable] It is introduced by commit 1eeedbcc20d6 ("drm/amd/display: get board layout for edid emulation"), but never used, so remove it. Reviewed-by: Harry Wentland Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 823843cd2613..c34797cb4d2d 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2739,7 +2739,6 @@ static enum bp_result bios_get_board_layout_info( struct board_layout_info *board_layout_info) { unsigned int i; - struct bios_parser *bp; enum bp_result record_result; const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = { @@ -2748,7 +2747,6 @@ static enum bp_result bios_get_board_layout_info( 0, 0 }; - bp = BP_FROM_DCB(dcb); if (board_layout_info == NULL) { DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n"); return BP_RESULT_BADINPUT; -- cgit From 8f72bfe8d85a827f638141d0f07de42d0c24a36f Mon Sep 17 00:00:00 2001 From: zhengbin Date: Thu, 14 Nov 2019 20:36:27 +0800 Subject: drm/amd/display: remove set but not used variable 'min_content' Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/amd/display/modules/color/color_gamma.c: In function build_freesync_hdr: drivers/gpu/drm/amd/display/modules/color/color_gamma.c:830:20: warning: variable min_content set but not used [-Wunused-but-set-variable] It is not used since commit 50575eb5b339 ("drm/amd/display: Only use EETF when maxCL > max display") Reviewed-by: Harry Wentland Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 1de4805cb8c7..9b121b08c806 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -937,7 +937,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, struct fixed31_32 max_display; struct fixed31_32 min_display; struct fixed31_32 max_content; - struct fixed31_32 min_content; struct fixed31_32 clip = dc_fixpt_one; struct fixed31_32 output; bool use_eetf = false; @@ -951,7 +950,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, max_display = dc_fixpt_from_int(fs_params->max_display); min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000); max_content = dc_fixpt_from_int(fs_params->max_content); - min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000); sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level); if (fs_params->min_display > 1000) // cap at 0.1 at the bottom -- cgit From 852a91d627e9ce849d68df9d3f5336689003bdc7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Nov 2019 20:44:13 +0000 Subject: drm/amdgpu/dm: Do not throw an error for a display with no audio An old display with no audio may not have an EDID with a CEA block, or it may simply be too old to support audio. This is not a driver error, so don't flag it as such. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112140 References: ae2a3495973e ("drm/amd: be quiet when no SAD block is found") Signed-off-by: Chris Wilson Cc: Harry Wentland Cc: Jean Delvare Cc: Alex Deucher Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 72e677796a48..66f266a5e10b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -97,8 +97,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps( (struct edid *) edid->raw_edid); sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); - if (sad_count < 0) - DRM_ERROR("Couldn't read SADs: %d\n", sad_count); if (sad_count <= 0) return result; -- cgit From 8c8048f207e785707270bc4985d8d4e1673eefb8 Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Fri, 26 Jul 2019 11:25:43 -0400 Subject: drm/amd/display: add automated audio test support Signed-off-by: abdoulaye berthe Reviewed-by: Wenjing Liu Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 92 ++++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 48 +++++++++++-- drivers/gpu/drm/amd/display/dc/dc_link.h | 1 + 3 files changed, 134 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index b814b749724b..b72db01afeed 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2546,6 +2546,92 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) 0); } +static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video) +{ + union audio_test_mode dpcd_test_mode = {0}; + struct audio_test_pattern_type dpcd_pattern_type = {0}; + union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0}; + enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + + struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; + struct pipe_ctx *pipe_ctx = &pipes[0]; + unsigned int channel_count; + unsigned int channel = 0; + unsigned int modes = 0; + unsigned int sampling_rate_in_hz = 0; + + // get audio test mode and test pattern parameters + core_link_read_dpcd( + link, + DP_TEST_AUDIO_MODE, + &dpcd_test_mode.raw, + sizeof(dpcd_test_mode)); + + core_link_read_dpcd( + link, + DP_TEST_AUDIO_PATTERN_TYPE, + &dpcd_pattern_type.value, + sizeof(dpcd_pattern_type)); + + channel_count = dpcd_test_mode.bits.channel_count + 1; + + // read pattern periods for requested channels when sawTooth pattern is requested + if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || + dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) { + + test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ? + DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + // read period for each channel + for (channel = 0; channel < channel_count; channel++) { + core_link_read_dpcd( + link, + DP_TEST_AUDIO_PERIOD_CH1 + channel, + &dpcd_pattern_period[channel].raw, + sizeof(dpcd_pattern_period[channel])); + } + } + + // translate sampling rate + switch (dpcd_test_mode.bits.sampling_rate) { + case AUDIO_SAMPLING_RATE_32KHZ: + sampling_rate_in_hz = 32000; + break; + case AUDIO_SAMPLING_RATE_44_1KHZ: + sampling_rate_in_hz = 44100; + break; + case AUDIO_SAMPLING_RATE_48KHZ: + sampling_rate_in_hz = 48000; + break; + case AUDIO_SAMPLING_RATE_88_2KHZ: + sampling_rate_in_hz = 88200; + break; + case AUDIO_SAMPLING_RATE_96KHZ: + sampling_rate_in_hz = 96000; + break; + case AUDIO_SAMPLING_RATE_176_4KHZ: + sampling_rate_in_hz = 176400; + break; + case AUDIO_SAMPLING_RATE_192KHZ: + sampling_rate_in_hz = 192000; + break; + default: + sampling_rate_in_hz = 0; + break; + } + + link->audio_test_data.flags.test_requested = 1; + link->audio_test_data.flags.disable_video = disable_video; + link->audio_test_data.sampling_rate = sampling_rate_in_hz; + link->audio_test_data.channel_count = channel_count; + link->audio_test_data.pattern_type = test_pattern; + + if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) { + for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) { + link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period; + } + } +} + static void handle_automated_test(struct dc_link *link) { union test_request test_request; @@ -2575,6 +2661,12 @@ static void handle_automated_test(struct dc_link *link) dp_test_send_link_test_pattern(link); test_response.bits.ACK = 1; } + + if (test_request.bits.AUDIO_TEST_PATTERN) { + dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO); + test_response.bits.ACK = 1; + } + if (test_request.bits.PHY_TEST_PATTERN) { dp_test_send_phy_test_pattern(link); test_response.bits.ACK = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 4d3378d61938..1b68d7c13085 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -469,13 +469,13 @@ union training_aux_rd_interval { /* Automated test structures */ union test_request { struct { - uint8_t LINK_TRAINING :1; - uint8_t LINK_TEST_PATTRN :1; - uint8_t EDID_READ :1; - uint8_t PHY_TEST_PATTERN :1; - uint8_t AUDIO_TEST_PATTERN :1; - uint8_t RESERVED :1; - uint8_t TEST_STEREO_3D :1; + uint8_t LINK_TRAINING :1; + uint8_t LINK_TEST_PATTRN :1; + uint8_t EDID_READ :1; + uint8_t PHY_TEST_PATTERN :1; + uint8_t RESERVED :1; + uint8_t AUDIO_TEST_PATTERN :1; + uint8_t TEST_AUDIO_DISABLED_VIDEO :1; } bits; uint8_t raw; }; @@ -534,6 +534,40 @@ union test_misc { unsigned char raw; }; +union audio_test_mode { + struct { + unsigned char sampling_rate :4; + unsigned char channel_count :4; + } bits; + unsigned char raw; +}; + +union audio_test_pattern_period { + struct { + unsigned char pattern_period :4; + unsigned char reserved :4; + } bits; + unsigned char raw; +}; + +struct audio_test_pattern_type { + unsigned char value; +}; + +struct dp_audio_test_data_flags { + uint8_t test_requested :1; + uint8_t disable_video :1; +}; + +struct dp_audio_test_data { + + struct dp_audio_test_data_flags flags; + uint8_t sampling_rate; + uint8_t channel_count; + uint8_t pattern_type; + uint8_t pattern_period[8]; +}; + /* FEC capability DPCD register field bits-*/ union dpcd_fec_capability { struct { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 8971ce3a5480..314d2043cd78 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -94,6 +94,7 @@ struct dc_link { struct dc_lane_settings cur_lane_setting; struct dc_link_settings preferred_link_setting; struct dc_link_training_overrides preferred_training_settings; + struct dp_audio_test_data audio_test_data; uint8_t ddc_hw_inst; -- cgit From e6b268dde4cdc4d0b35eb7997520327183957294 Mon Sep 17 00:00:00 2001 From: Joseph Gravenor Date: Thu, 17 Oct 2019 11:56:34 -0400 Subject: drm/amd/display: Renoir chroma viewport WA change formula [why] we want to increase the pte row plus 1 line if chroma viewport height is integer multiple of the pte row height [how] instead of ceiling viewport height, we floor it. this allows us to accommodate both cases: those where the chroma viewport height is integer multiple of the pte row height and those where it is not Signed-off-by: Joseph Gravenor Reviewed-by: Tony Cheng Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 1ddd6ae22155..d86b6b6211bc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -204,8 +204,8 @@ void hubp21_set_viewport( PTE_ROW_HEIGHT_LINEAR, &pte_row_height); pte_row_height = 1 << (pte_row_height + 3); - pte_rows = (viewport_c->height + pte_row_height - 1) / pte_row_height; - patched_viewport_height = pte_rows * pte_row_height + 3; + pte_rows = (viewport_c->height / pte_row_height) + 1; + patched_viewport_height = pte_rows * pte_row_height + 1; } -- cgit From db8ff9d38c18bed7dad5bf757d6e141603922c2b Mon Sep 17 00:00:00 2001 From: Joseph Gravenor Date: Thu, 24 Oct 2019 13:55:10 -0400 Subject: drm/amd/display: Renoir chroma viewport WA Read the correct register [why] Before we were reading registers specific to luma size, which caused a black line to appear on the screen from time to time, as although the luma row height is generally the same as the chroma row height for the video case, it will sometimes be one more [how] Read the register specific for the chroma size Signed-off-by: Joseph Gravenor Reviewed-by: Tony Cheng Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index d86b6b6211bc..32e8b589aeb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -200,8 +200,8 @@ void hubp21_set_viewport( int pte_row_height = 0; int pte_rows = 0; - REG_GET(DCHUBP_REQ_SIZE_CONFIG, - PTE_ROW_HEIGHT_LINEAR, &pte_row_height); + REG_GET(DCHUBP_REQ_SIZE_CONFIG_C, + PTE_ROW_HEIGHT_LINEAR_C, &pte_row_height); pte_row_height = 1 << (pte_row_height + 3); pte_rows = (viewport_c->height / pte_row_height) + 1; -- cgit From d3698cea22c6c203bdb95a5330664c0e5dfd0829 Mon Sep 17 00:00:00 2001 From: "Leo (Hanghong) Ma" Date: Fri, 25 Oct 2019 09:40:13 -0400 Subject: drm/amd/display: Add hubp clock status in DTN log for Navi [Why] For debug purpose, we need to check HUBP_CLOCK_ENABLE in DTN log debugfs on Navi. [How] Add related register read in dcn20_hubp.c. Signed-off-by: Leo (Hanghong) Ma Reviewed-by: Harry Wentland Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index f04325604f6c..7d9ffb81584a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -1202,6 +1202,9 @@ void hubp2_read_state_common(struct hubp *hubp) HUBP_TTU_DISABLE, &s->ttu_disable, HUBP_UNDERFLOW_STATUS, &s->underflow_status); + REG_GET(HUBP_CLK_CNTL, + HUBP_CLOCK_ENABLE, &s->clock_en); + REG_GET(DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, &s->min_ttu_vblank); -- cgit From 9b265eba45d34373c7814187d9bcc8df80bffd70 Mon Sep 17 00:00:00 2001 From: Hugo Hu Date: Fri, 25 Oct 2019 15:33:15 +0800 Subject: drm/amd/display: Update background color in bottommost mpcc [Why] Background color only takes effect in bottommost mpcc. [How] Update background color in bottommost mpcc. Signed-off-by: Hugo Hu Reviewed-by: Yongqiang Sun Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 220154f7911a..04f863499cfb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -42,20 +42,27 @@ void mpc1_set_bg_color(struct mpc *mpc, int mpcc_id) { struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id); + uint32_t bg_r_cr, bg_g_y, bg_b_cb; + + /* find bottommost mpcc. */ + while (bottommost_mpcc->mpcc_bot) { + bottommost_mpcc = bottommost_mpcc->mpcc_bot; + } /* mpc color is 12 bit. tg_color is 10 bit */ /* todo: might want to use 16 bit to represent color and have each * hw block translate to correct color depth. */ - uint32_t bg_r_cr = bg_color->color_r_cr << 2; - uint32_t bg_g_y = bg_color->color_g_y << 2; - uint32_t bg_b_cb = bg_color->color_b_cb << 2; + bg_r_cr = bg_color->color_r_cr << 2; + bg_g_y = bg_color->color_g_y << 2; + bg_b_cb = bg_color->color_b_cb << 2; - REG_SET(MPCC_BG_R_CR[mpcc_id], 0, + REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0, MPCC_BG_R_CR, bg_r_cr); - REG_SET(MPCC_BG_G_Y[mpcc_id], 0, + REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0, MPCC_BG_G_Y, bg_g_y); - REG_SET(MPCC_BG_B_CB[mpcc_id], 0, + REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0, MPCC_BG_B_CB, bg_b_cb); } -- cgit From 1bc22f20ae24f60ad162457913504f26b724c621 Mon Sep 17 00:00:00 2001 From: Stylon Wang Date: Fri, 20 Sep 2019 15:40:55 +0800 Subject: drm/amd/display: Fix incorrect deep color setting in YCBCR420 modes [Why] HDMI 2.0 HF-VSDB in EDID defines supported color depths in YCBCR420 modes. But we did not honor these bit masks when choosing pixel encoding. HDMI 2.0 compliance tests with deep color and YCBCR420 failed as a result. [How] Cap color depth based on y420_dc_modes from EDID. Signed-off-by: Stylon Wang Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 31 ++++++++++++++++++----- 1 file changed, 25 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 700cbd519f86..11b9c65e1ee8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3539,12 +3539,26 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode, static enum dc_color_depth convert_color_depth_from_display_info(const struct drm_connector *connector, - const struct drm_connector_state *state) + const struct drm_connector_state *state, + bool is_y420) { - uint8_t bpc = (uint8_t)connector->display_info.bpc; + uint8_t bpc; - /* Assume 8 bpc by default if no bpc is specified. */ - bpc = bpc ? bpc : 8; + if (is_y420) { + bpc = 8; + + /* Cap display bpc based on HDMI 2.0 HF-VSDB */ + if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) + bpc = 16; + else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) + bpc = 12; + else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) + bpc = 10; + } else { + bpc = (uint8_t)connector->display_info.bpc; + /* Assume 8 bpc by default if no bpc is specified. */ + bpc = bpc ? bpc : 8; + } if (!state) state = connector->state; @@ -3715,7 +3729,8 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; timing_out->display_color_depth = convert_color_depth_from_display_info( - connector, connector_state); + connector, connector_state, + (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)); timing_out->scan_type = SCANNING_TYPE_NODATA; timing_out->hdmi_vic = 0; @@ -4847,6 +4862,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, struct drm_dp_mst_port *mst_port; enum dc_color_depth color_depth; int clock, bpp = 0; + bool is_y420 = false; if (!aconnector->port || !aconnector->dc_sink) return 0; @@ -4858,7 +4874,10 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, return 0; if (!state->duplicated) { - color_depth = convert_color_depth_from_display_info(connector, conn_state); + is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && + aconnector->force_yuv420_output; + color_depth = convert_color_depth_from_display_info(connector, conn_state, + is_y420); bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; clock = adjusted_mode->clock; dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp); -- cgit From 7b23b0b450720ab85f582d2c7692172b41d74457 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 28 Oct 2019 08:50:33 -0400 Subject: drm/amd/display: 3.2.59 Signed-off-by: Aric Cyr Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 2e6b3ecd564d..4c6c2fcc6a96 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.58" +#define DC_VER "3.2.59" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit From 8d8a6af71a75e09ce5796b4ae780818865832c50 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Wed, 23 Oct 2019 21:36:29 -0400 Subject: drm/amd/display: Fix stereo with DCC enabled [Why] When sending DCC with Stereo, DCC gets enabled but the meta addresses are 0. This happens momentarily before the meta addresses are populated with a valid address. [How] Add call validate_dcc_with_meta_address() in copy_surface_update_to_plane() to check for surface address and DCC change. When DCC has changed, check if DCC enable is true but meta address is 0. If so, we turn DCC enable to false. When surface address has changed, we check if DCC enable is false but meta address is not 0. If so, we turn DCC enable back to true. This will restore DCC enable to the proper setting once the meta address is valid. Signed-off-by: Samson Tam Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 7f796a4c73d4..c3a315f1d5f8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1747,12 +1747,37 @@ static struct dc_stream_status *stream_get_status( static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; +static void validate_dcc_with_meta_address( + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address) +{ + if ((address->grph.meta_addr.quad_part == 0) && + dcc->enable) { + ASSERT(!dcc->enable); + dcc->enable = false; + } else if ((address->grph.meta_addr.quad_part != 0) && + !dcc->enable) + dcc->enable = true; + + if (address->type != PLN_ADDR_TYPE_GRAPHICS) { + if ((address->grph_stereo.right_meta_addr.quad_part == 0) && + dcc->enable) { + ASSERT(!dcc->enable); + dcc->enable = false; + } else if ((address->grph_stereo.right_meta_addr.quad_part != 0) && + !dcc->enable) + dcc->enable = true; + } +} + static void copy_surface_update_to_plane( struct dc_plane_state *surface, struct dc_surface_update *srf_update) { if (srf_update->flip_addr) { surface->address = srf_update->flip_addr->address; + validate_dcc_with_meta_address(&surface->dcc, &surface->address); + surface->flip_immediate = srf_update->flip_addr->flip_immediate; surface->time.time_elapsed_in_us[surface->time.index] = @@ -1801,6 +1826,8 @@ static void copy_surface_update_to_plane( srf_update->plane_info->global_alpha_value; surface->dcc = srf_update->plane_info->dcc; + validate_dcc_with_meta_address(&surface->dcc, &surface->address); + surface->sdr_white_level = srf_update->plane_info->sdr_white_level; surface->layer_index = -- cgit From 2f4888840090329d0369daad72c3b8ff84ce647a Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 24 Oct 2019 15:45:44 -0400 Subject: drm/amd/display: Changes in dc to allow full update in some cases Changes in dc to allow for different cases where full update is required. Signed-off-by: Alvin Lee Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 22 +++++++++++++--------- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.h | 2 +- .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 11 ++++++----- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 +- 4 files changed, 21 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 3d5a79ff1151..f69b45eeb766 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1773,10 +1773,11 @@ void dcn20_populate_dml_writeback_from_context( } int dcn20_populate_dml_pipes_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes) { int pipe_cnt, i; bool synchronized_vblank = true; + struct resource_context *res_ctx = &context->res_ctx; for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) @@ -1796,10 +1797,13 @@ int dcn20_populate_dml_pipes_from_context( for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; + unsigned int v_total; int output_bpc; if (!res_ctx->pipe_ctx[i].stream) continue; + + v_total = timing->v_total; /* todo: pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; pipes[pipe_cnt].pipe.src.dcc = 0; @@ -1812,7 +1816,7 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; /* 1/2 vblank */ pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = - (timing->v_total - timing->v_addressable + (v_total - timing->v_addressable - timing->v_border_top - timing->v_border_bottom) / 2; /* 36 bytes dp, 32 hdmi */ pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = @@ -1826,13 +1830,13 @@ int dcn20_populate_dml_pipes_from_context( - timing->h_addressable - timing->h_border_left - timing->h_border_right; - pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch; + pipes[pipe_cnt].pipe.dest.vblank_start = v_total - timing->v_front_porch; pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom; pipes[pipe_cnt].pipe.dest.htotal = timing->h_total; - pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; + pipes[pipe_cnt].pipe.dest.vtotal = v_total; pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable; pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable; pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE; @@ -1967,8 +1971,8 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.scale_taps.vtaps = 1; pipes[pipe_cnt].pipe.src.is_hsplit = 0; pipes[pipe_cnt].pipe.dest.odm_combine = 0; - pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total; - pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total; + pipes[pipe_cnt].pipe.dest.vtotal_min = v_total; + pipes[pipe_cnt].pipe.dest.vtotal_max = v_total; } else { struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state; struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data; @@ -2430,7 +2434,7 @@ bool dcn20_fast_validate_bw( dcn20_merge_pipes_for_validate(dc, context); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes); *pipe_cnt_out = pipe_cnt; @@ -2576,10 +2580,10 @@ static void dcn20_calculate_wm( if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); + context, pipes); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); + context, pipes); } *out_pipe_cnt = pipe_cnt; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index fa00989584dc..840ca66c34e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -50,7 +50,7 @@ unsigned int dcn20_calc_max_scaled_time( enum mmhubbub_wbif_mode mode, unsigned int urgent_watermark); int dcn20_populate_dml_pipes_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( struct dc_state *state, const struct resource_pool *pool, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 39321b2a5504..8e69346ff27c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -650,7 +650,7 @@ static const struct dcn10_stream_encoder_mask se_mask = { static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu); static int dcn21_populate_dml_pipes_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); static struct input_pixel_processor *dcn21_ipp_create( struct dc_context *ctx, uint32_t inst) @@ -1053,10 +1053,10 @@ void dcn21_calculate_wm( if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); + context, pipes); else pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); + context, pipes); } *out_pipe_cnt = pipe_cnt; @@ -1591,10 +1591,11 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) } static int dcn21_populate_dml_pipes_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes) { - uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes); + uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes); int i; + struct resource_context *res_ctx = &context->res_ctx; for (i = 0; i < dc->res_pool->pipe_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index e0aac234537f..16f6ef22367b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -101,7 +101,7 @@ struct resource_funcs { int (*populate_dml_pipes)( struct dc *dc, - struct resource_context *res_ctx, + struct dc_state *context, display_e2e_pipe_params_st *pipes); enum dc_status (*validate_global)( -- cgit From c09eeee4f3a704ba4b5b743fdc34520f2e9d503d Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 29 Oct 2019 14:23:55 -0400 Subject: drm/amd/display: Add DMUB service function check if hw initialized [Why] We want to avoid reprogramming the cache window when possible. We don't need to worry about it for S3 but we *do* need to worry about it for S4 resume. DM can check whether hardware should be reinitialized or store software state when going to S4 to know whether we need to reprogram hardware. [How] Add helpers to the DMUB service to check hardware initialization state. DM will hook it up later. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Tony Cheng Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h | 11 +++++++++++ drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 5 +++++ drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h | 2 ++ drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 14 ++++++++++++++ 4 files changed, 32 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h index 45e427d1952e..6f3eca266694 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -252,6 +252,8 @@ struct dmub_srv_hw_funcs { bool (*is_supported)(struct dmub_srv *dmub); + bool (*is_hw_init)(struct dmub_srv *dmub); + bool (*is_phy_init)(struct dmub_srv *dmub); bool (*is_auto_load_done)(struct dmub_srv *dmub); @@ -380,6 +382,15 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, bool *is_supported); +/** + * dmub_srv_is_hw_init() - returns hardware init state + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init); + /** * dmub_srv_hw_init() - initializes the underlying DMUB hardware * @dmub: the dmub service diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index 236a4156bbe1..89fd27758dd5 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -122,6 +122,11 @@ void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset) REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset); } +bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_REGION3_CW2_BASE_ADDRESS) != 0; +} + bool dmub_dcn20_is_supported(struct dmub_srv *dmub) { uint32_t supported = 0; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index 41269da40363..e1ba748ca594 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -55,6 +55,8 @@ uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub); void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); +bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub); + bool dmub_dcn20_is_supported(struct dmub_srv *dmub); bool dmub_dcn20_is_phy_init(struct dmub_srv *dmub); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 229eab7277d1..2d63ae80bda9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -76,6 +76,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; funcs->is_supported = dmub_dcn20_is_supported; funcs->is_phy_init = dmub_dcn20_is_phy_init; + funcs->is_hw_init = dmub_dcn20_is_hw_init; if (asic == DMUB_ASIC_DCN21) { funcs->backdoor_load = dmub_dcn21_backdoor_load; @@ -234,6 +235,19 @@ enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, return DMUB_STATUS_OK; } +enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) +{ + *is_hw_init = false; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.is_hw_init) + *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); + + return DMUB_STATUS_OK; +} + enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) { -- cgit From ab16c7363df1827599fd726b7467038d146c154e Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 30 Oct 2019 09:02:39 -0400 Subject: drm/amd/display: Add DMUB param to load inst const from driver [Why] By default we shouldn't be trying to write secure registers during DMUB hardware init. [How] Add a parameter to control whether we put the DMCUB into secure reset and attempt to load CW0/CW1. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Tony Cheng Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h | 2 ++ drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h index 6f3eca266694..fdedbe15e026 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -281,12 +281,14 @@ struct dmub_srv_create_params { * @fb_base: base of the framebuffer aperture * @fb_offset: offset of the framebuffer aperture * @psp_version: psp version to pass for DMCU init + * @load_inst_const: true if DMUB should load inst const fw */ struct dmub_srv_hw_params { struct dmub_fb *fb[DMUB_WINDOW_TOTAL]; uint64_t fb_base; uint64_t fb_offset; uint32_t psp_version; + bool load_inst_const; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 2d63ae80bda9..0dd32edbbcb3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -278,7 +278,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, cw1.region.base = DMUB_CW1_BASE; cw1.region.top = cw1.region.base + stack_fb->size - 1; - if (dmub->hw_funcs.backdoor_load) + if (params->load_inst_const && dmub->hw_funcs.backdoor_load) dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); } -- cgit From 5b956e9873cb3658fd29985c446404e3bc7ae5a7 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Tue, 29 Oct 2019 11:43:05 -0400 Subject: drm/amd/display: Add debugfs initalization on mst connectors [why] We were missing debugfs files on MST connectors as the files weren't initialized. [how] Move connector debugfs initialization into connoctor's init helper function so it will be called by both SST and MST connectors. Also move connector registration so it will be registered before we create the entries. Signed-off-by: Mikita Lipski Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 11b9c65e1ee8..c4b9655640b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5584,6 +5584,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_connector_attach_content_protection_property(&aconnector->base, false); #endif } + +#if defined(CONFIG_DEBUG_FS) + connector_debugfs_init(aconnector); + aconnector->debugfs_dpcd_address = 0; + aconnector->debugfs_dpcd_size = 0; +#endif } static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, @@ -5706,6 +5712,8 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, &aconnector->base, &amdgpu_dm_connector_helper_funcs); + drm_connector_register(&aconnector->base); + amdgpu_dm_connector_init_helper( dm, aconnector, @@ -5716,13 +5724,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, drm_connector_attach_encoder( &aconnector->base, &aencoder->base); - drm_connector_register(&aconnector->base); -#if defined(CONFIG_DEBUG_FS) - connector_debugfs_init(aconnector); - aconnector->debugfs_dpcd_address = 0; - aconnector->debugfs_dpcd_size = 0; -#endif - if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) amdgpu_dm_initialize_dp_connector(dm, aconnector); -- cgit From cbd8394d3a97f6e7457f561bab5abc9c41b664e9 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Tue, 29 Oct 2019 15:49:28 -0400 Subject: drm/amd/display: Connect DIG FE to its BE before link training starts [why] In SST mode no idle pattern will be generated after link training if DIG FE is not connected to DIG BE. Signed-off-by: Nikola Cornij Reviewed-by: Tony Cheng Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 1be427759771..49f3d0f67806 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1546,6 +1546,10 @@ static enum dc_status enable_link_dp( panel_mode = dp_get_panel_mode(link); dp_set_panel_mode(link, panel_mode); + /* We need to do this before the link training to ensure the idle pattern in SST + * mode will be sent right after the link training */ + link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, + pipe_ctx->stream_res.stream_enc->id, true); skip_video_pattern = true; if (link_settings.link_rate == LINK_RATE_LOW) -- cgit From 581c4488318d5e41479521cf7ba63f877e5246fd Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Mon, 28 Oct 2019 11:45:14 -0400 Subject: drm/amd/display: Clean up some code with unused registers [Why] Unused register in the code [How] Remove unused register Signed-off-by: Anthony Koo Reviewed-by: Tony Cheng Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index ebe8f9a21be2..bff03a68aa01 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -792,8 +792,7 @@ struct dce_hwseq_registers { type D2VGA_MODE_ENABLE; \ type D3VGA_MODE_ENABLE; \ type D4VGA_MODE_ENABLE; \ - type AZALIA_AUDIO_DTO_MODULE;\ - type HPO_HDMISTREAMCLK_GATE_DIS; + type AZALIA_AUDIO_DTO_MODULE; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) -- cgit From be32c9891c13fe26e9f7fb4020b2adc40ca2dec3 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Thu, 31 Oct 2019 15:27:28 -0400 Subject: drm/amd/display: revert change causing DTN hang for RV [Why] Hanging on RV for DTN driver verifier [How] Roll back change and investigate further Signed-off-by: Samson Tam Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 27 --------------------------- 1 file changed, 27 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c3a315f1d5f8..7f796a4c73d4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1747,37 +1747,12 @@ static struct dc_stream_status *stream_get_status( static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; -static void validate_dcc_with_meta_address( - struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address) -{ - if ((address->grph.meta_addr.quad_part == 0) && - dcc->enable) { - ASSERT(!dcc->enable); - dcc->enable = false; - } else if ((address->grph.meta_addr.quad_part != 0) && - !dcc->enable) - dcc->enable = true; - - if (address->type != PLN_ADDR_TYPE_GRAPHICS) { - if ((address->grph_stereo.right_meta_addr.quad_part == 0) && - dcc->enable) { - ASSERT(!dcc->enable); - dcc->enable = false; - } else if ((address->grph_stereo.right_meta_addr.quad_part != 0) && - !dcc->enable) - dcc->enable = true; - } -} - static void copy_surface_update_to_plane( struct dc_plane_state *surface, struct dc_surface_update *srf_update) { if (srf_update->flip_addr) { surface->address = srf_update->flip_addr->address; - validate_dcc_with_meta_address(&surface->dcc, &surface->address); - surface->flip_immediate = srf_update->flip_addr->flip_immediate; surface->time.time_elapsed_in_us[surface->time.index] = @@ -1826,8 +1801,6 @@ static void copy_surface_update_to_plane( srf_update->plane_info->global_alpha_value; surface->dcc = srf_update->plane_info->dcc; - validate_dcc_with_meta_address(&surface->dcc, &surface->address); - surface->sdr_white_level = srf_update->plane_info->sdr_white_level; surface->layer_index = -- cgit From 03527f0d0056f5605863d894615a0533afc34c7c Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Thu, 31 Oct 2019 16:09:01 -0400 Subject: drm/amd/display: Fix debugfs on MST connectors [why] Previous patch allowed to initialize debugfs entries on both MST and SST connectors, but MST connectors get registered much later which exposed an issue of debugfs entries being initialized in the same folder. [how] Return SST debugfs entries' initialization back to where it was. For MST connectors we should initialize debugfs entries in connector register function after the connector is registered. Signed-off-by: Mikita Lipski Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 15 +++++++-------- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 10 +++++++++- 2 files changed, 16 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c4b9655640b8..11b9c65e1ee8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5584,12 +5584,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_connector_attach_content_protection_property(&aconnector->base, false); #endif } - -#if defined(CONFIG_DEBUG_FS) - connector_debugfs_init(aconnector); - aconnector->debugfs_dpcd_address = 0; - aconnector->debugfs_dpcd_size = 0; -#endif } static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, @@ -5712,8 +5706,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, &aconnector->base, &amdgpu_dm_connector_helper_funcs); - drm_connector_register(&aconnector->base); - amdgpu_dm_connector_init_helper( dm, aconnector, @@ -5724,6 +5716,13 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, drm_connector_attach_encoder( &aconnector->base, &aencoder->base); + drm_connector_register(&aconnector->base); +#if defined(CONFIG_DEBUG_FS) + connector_debugfs_init(aconnector); + aconnector->debugfs_dpcd_address = 0; + aconnector->debugfs_dpcd_size = 0; +#endif + if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) amdgpu_dm_initialize_dp_connector(dm, aconnector); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index ba508a897263..205531ca686f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,7 +36,9 @@ #include "dc_link_ddc.h" #include "i2caux_interface.h" - +#if defined(CONFIG_DEBUG_FS) +#include "amdgpu_dm_debugfs.h" +#endif /* #define TRACE_DPCD */ #ifdef TRACE_DPCD @@ -147,6 +149,12 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) to_amdgpu_dm_connector(connector); struct drm_dp_mst_port *port = amdgpu_dm_connector->port; +#if defined(CONFIG_DEBUG_FS) + connector_debugfs_init(amdgpu_dm_connector); + amdgpu_dm_connector->debugfs_dpcd_address = 0; + amdgpu_dm_connector->debugfs_dpcd_size = 0; +#endif + return drm_dp_mst_connector_late_register(connector, port); } -- cgit From d9e32672a1285d6c5e06bedaabb465441c172aa8 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 31 Oct 2019 21:39:39 -0400 Subject: drm/amd/display: cleanup of construct and destruct funcs [Why] Too many construct functions which makes searching difficult, especially on some debuggers. [How] Append all construct and destruct functions with dcn number and object type to make each construct function name unique Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 4 +-- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 8 +++--- drivers/gpu/drm/amd/display/dc/core/dc.c | 10 +++---- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 8 +++--- drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 8 +++--- drivers/gpu/drm/amd/display/dc/core/dc_sink.c | 8 +++--- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 8 +++--- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 8 +++--- .../drm/amd/display/dc/dce100/dce100_resource.c | 10 +++---- .../drm/amd/display/dc/dce110/dce110_resource.c | 10 +++---- .../drm/amd/display/dc/dce112/dce112_resource.c | 10 +++---- .../drm/amd/display/dc/dce120/dce120_resource.c | 10 +++---- .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 10 +++---- .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 10 +++---- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 10 +++---- .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 10 +++---- drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c | 12 ++++---- drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c | 23 +++++----------- drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c | 32 ++++++---------------- .../amd/display/dc/irq/dce110/irq_service_dce110.c | 4 +-- .../amd/display/dc/irq/dce120/irq_service_dce120.c | 4 +-- .../amd/display/dc/irq/dce80/irq_service_dce80.c | 4 +-- .../amd/display/dc/irq/dcn10/irq_service_dcn10.c | 4 +-- .../amd/display/dc/irq/dcn20/irq_service_dcn20.c | 4 +-- .../amd/display/dc/irq/dcn21/irq_service_dcn21.c | 4 +-- 25 files changed, 104 insertions(+), 129 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index c34797cb4d2d..27451f2a938b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -111,7 +111,7 @@ struct dc_bios *bios_parser_create( return NULL; } -static void destruct(struct bios_parser *bp) +static void bios_parser_destruct(struct bios_parser *bp) { kfree(bp->base.bios_local_image); kfree(bp->base.integrated_info); @@ -126,7 +126,7 @@ static void bios_parser_destroy(struct dc_bios **dcb) return; } - destruct(bp); + bios_parser_destruct(bp); kfree(bp); *dcb = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 9dc8d4e779ca..eb06ee765c78 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -111,7 +111,7 @@ static struct atom_encoder_caps_record *get_encoder_cap_record( #define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table) -static void destruct(struct bios_parser *bp) +static void bios_parser2_destruct(struct bios_parser *bp) { kfree(bp->base.bios_local_image); kfree(bp->base.integrated_info); @@ -126,7 +126,7 @@ static void firmware_parser_destroy(struct dc_bios **dcb) return; } - destruct(bp); + bios_parser2_destruct(bp); kfree(bp); *dcb = NULL; @@ -1925,7 +1925,7 @@ static const struct dc_vbios_funcs vbios_funcs = { .get_board_layout_info = bios_get_board_layout_info, }; -static bool bios_parser_construct( +static bool bios_parser2_construct( struct bios_parser *bp, struct bp_init_data *init, enum dce_version dce_version) @@ -2018,7 +2018,7 @@ struct dc_bios *firmware_parser_create( if (!bp) return NULL; - if (bios_parser_construct(bp, init, dce_version)) + if (bios_parser2_construct(bp, init, dce_version)) return &bp->base; kfree(bp); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 7f796a4c73d4..6089c90ad6cd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -533,7 +533,7 @@ void dc_stream_set_static_screen_events(struct dc *dc, dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events); } -static void destruct(struct dc *dc) +static void dc_destruct(struct dc *dc) { if (dc->current_state) { dc_release_state(dc->current_state); @@ -579,7 +579,7 @@ static void destruct(struct dc *dc) } -static bool construct(struct dc *dc, +static bool dc_construct(struct dc *dc, const struct dc_init_data *init_params) { struct dc_context *dc_ctx; @@ -729,7 +729,7 @@ static bool construct(struct dc *dc, fail: - destruct(dc); + dc_destruct(dc); return false; } @@ -795,7 +795,7 @@ struct dc *dc_create(const struct dc_init_data *init_params) if (NULL == dc) goto alloc_fail; - if (false == construct(dc, init_params)) + if (false == dc_construct(dc, init_params)) goto construct_fail; full_pipe_count = dc->res_pool->pipe_count; @@ -852,7 +852,7 @@ void dc_deinit_callbacks(struct dc *dc) void dc_destroy(struct dc **dc) { - destruct(*dc); + dc_destruct(*dc); kfree(*dc); *dc = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 49f3d0f67806..cfb8f9010673 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -74,7 +74,7 @@ enum { /******************************************************************************* * Private functions ******************************************************************************/ -static void destruct(struct dc_link *link) +static void dc_link_destruct(struct dc_link *link) { int i; @@ -1244,7 +1244,7 @@ static enum transmitter translate_encoder_to_transmitter( } } -static bool construct( +static bool dc_link_construct( struct dc_link *link, const struct link_init_data *init_params) { @@ -1446,7 +1446,7 @@ struct dc_link *link_create(const struct link_init_data *init_params) if (NULL == link) goto alloc_fail; - if (false == construct(link, init_params)) + if (false == dc_link_construct(link, init_params)) goto construct_fail; return link; @@ -1460,7 +1460,7 @@ alloc_fail: void link_destroy(struct dc_link **link) { - destruct(*link); + dc_link_destruct(*link); kfree(*link); *link = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index c8037af93e0a..3fc9752edfe0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -187,7 +187,7 @@ void dal_ddc_i2c_payloads_add( } -static void construct( +static void ddc_service_construct( struct ddc_service *ddc_service, struct ddc_service_init_data *init_data) { @@ -239,11 +239,11 @@ struct ddc_service *dal_ddc_service_create( if (!ddc_service) return NULL; - construct(ddc_service, init_data); + ddc_service_construct(ddc_service, init_data); return ddc_service; } -static void destruct(struct ddc_service *ddc) +static void ddc_service_destruct(struct ddc_service *ddc) { if (ddc->ddc_pin) dal_gpio_destroy_ddc(&ddc->ddc_pin); @@ -255,7 +255,7 @@ void dal_ddc_service_destroy(struct ddc_service **ddc) BREAK_TO_DEBUGGER(); return; } - destruct(*ddc); + ddc_service_destruct(*ddc); kfree(*ddc); *ddc = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c index 5cbfdf1c4b11..a249a0e5edd0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c @@ -33,7 +33,7 @@ * Private functions ******************************************************************************/ -static void destruct(struct dc_sink *sink) +static void dc_sink_destruct(struct dc_sink *sink) { if (sink->dc_container_id) { kfree(sink->dc_container_id); @@ -41,7 +41,7 @@ static void destruct(struct dc_sink *sink) } } -static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) +static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) { struct dc_link *link = init_params->link; @@ -75,7 +75,7 @@ void dc_sink_retain(struct dc_sink *sink) static void dc_sink_free(struct kref *kref) { struct dc_sink *sink = container_of(kref, struct dc_sink, refcount); - destruct(sink); + dc_sink_destruct(sink); kfree(sink); } @@ -91,7 +91,7 @@ struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params) if (NULL == sink) goto alloc_fail; - if (false == construct(sink, init_params)) + if (false == dc_sink_construct(sink, init_params)) goto construct_fail; kref_init(&sink->refcount); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 88a84bfaea6f..8d4ffc683204 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -58,7 +58,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink) } } -static void construct(struct dc_stream_state *stream, +static void dc_stream_construct(struct dc_stream_state *stream, struct dc_sink *dc_sink_data) { uint32_t i = 0; @@ -127,7 +127,7 @@ static void construct(struct dc_stream_state *stream, stream->ctx->dc_stream_id_count++; } -static void destruct(struct dc_stream_state *stream) +static void dc_stream_destruct(struct dc_stream_state *stream) { dc_sink_release(stream->sink); if (stream->out_transfer_func != NULL) { @@ -145,7 +145,7 @@ static void dc_stream_free(struct kref *kref) { struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount); - destruct(stream); + dc_stream_destruct(stream); kfree(stream); } @@ -168,7 +168,7 @@ struct dc_stream_state *dc_create_stream_for_sink( if (stream == NULL) return NULL; - construct(stream, sink); + dc_stream_construct(stream, sink); kref_init(&stream->refcount); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index aaecdee2b8b4..e60aff46d510 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -37,7 +37,7 @@ /******************************************************************************* * Private functions ******************************************************************************/ -static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state) +static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) { plane_state->ctx = ctx; @@ -68,7 +68,7 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state } -static void destruct(struct dc_plane_state *plane_state) +static void dc_plane_destruct(struct dc_plane_state *plane_state) { if (plane_state->gamma_correction != NULL) { dc_gamma_release(&plane_state->gamma_correction); @@ -117,7 +117,7 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc) return NULL; kref_init(&plane_state->refcount); - construct(core_dc->ctx, plane_state); + dc_plane_construct(core_dc->ctx, plane_state); return plane_state; } @@ -187,7 +187,7 @@ void dc_plane_state_retain(struct dc_plane_state *plane_state) static void dc_plane_state_free(struct kref *kref) { struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); - destruct(plane_state); + dc_plane_destruct(plane_state); kvfree(plane_state); } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index a5e122c721ec..8f78bf9abbca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -725,7 +725,7 @@ void dce100_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce100_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -885,7 +885,7 @@ static void dce100_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce100_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -950,7 +950,7 @@ static const struct resource_funcs dce100_res_pool_funcs = { .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; -static bool construct( +static bool dce100_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) @@ -1122,7 +1122,7 @@ static bool construct( return true; res_create_fail: - destruct(pool); + dce100_resource_destruct(pool); return false; } @@ -1137,7 +1137,7 @@ struct resource_pool *dce100_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool)) + if (dce100_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 83a4dbf6d76e..a535e2cda694 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -782,7 +782,7 @@ void dce110_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce110_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -1161,7 +1161,7 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce110_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -1313,7 +1313,7 @@ const struct resource_caps *dce110_resource_cap( return &carrizo_resource_cap; } -static bool construct( +static bool dce110_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool, @@ -1492,7 +1492,7 @@ static bool construct( return true; res_create_fail: - destruct(pool); + dce110_resource_destruct(pool); return false; } @@ -1507,7 +1507,7 @@ struct resource_pool *dce110_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool, asic_id)) + if (dce110_resource_construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 97dcc5d0862b..700ad8b3e54b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -744,7 +744,7 @@ void dce112_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce112_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -1013,7 +1013,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce112_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -1186,7 +1186,7 @@ const struct resource_caps *dce112_resource_cap( return &polaris_10_resource_cap; } -static bool construct( +static bool dce112_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) @@ -1372,7 +1372,7 @@ static bool construct( return true; res_create_fail: - destruct(pool); + dce112_resource_destruct(pool); return false; } @@ -1386,7 +1386,7 @@ struct resource_pool *dce112_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool)) + if (dce112_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 63543f6918ff..305bb0841563 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -587,7 +587,7 @@ static void dce120_transform_destroy(struct transform **xfm) *xfm = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce120_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -872,7 +872,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce120_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -1024,7 +1024,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) return value; } -static bool construct( +static bool dce120_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) @@ -1237,7 +1237,7 @@ controller_create_fail: clk_src_create_fail: res_create_fail: - destruct(pool); + dce120_resource_destruct(pool); return false; } @@ -1252,7 +1252,7 @@ struct resource_pool *dce120_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool)) + if (dce120_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 3e8d4b49f279..2ad5c28c6e66 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -773,7 +773,7 @@ static struct input_pixel_processor *dce80_ipp_create( return &ipp->base; } -static void destruct(struct dce110_resource_pool *pool) +static void dce80_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -901,7 +901,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce80_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -1093,7 +1093,7 @@ static bool dce80_construct( return true; res_create_fail: - destruct(pool); + dce80_resource_destruct(pool); return false; } @@ -1290,7 +1290,7 @@ static bool dce81_construct( return true; res_create_fail: - destruct(pool); + dce80_resource_destruct(pool); return false; } @@ -1483,7 +1483,7 @@ static bool dce83_construct( return true; res_create_fail: - destruct(pool); + dce80_resource_destruct(pool); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 15640aedd664..602769e2f4a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -919,7 +919,7 @@ static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx) return pp_smu; } -static void destruct(struct dcn10_resource_pool *pool) +static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) { unsigned int i; @@ -1166,7 +1166,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool) { struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); - destruct(dcn10_pool); + dcn10_resource_destruct(dcn10_pool); kfree(dcn10_pool); *pool = NULL; } @@ -1305,7 +1305,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) return value; } -static bool construct( +static bool dcn10_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn10_resource_pool *pool) @@ -1592,7 +1592,7 @@ static bool construct( fail: - destruct(pool); + dcn10_resource_destruct(pool); return false; } @@ -1607,7 +1607,7 @@ struct resource_pool *dcn10_create_resource_pool( if (!pool) return NULL; - if (construct(init_data->num_virtual_links, dc, pool)) + if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index f69b45eeb766..38056e111c61 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1226,7 +1226,7 @@ void dcn20_dsc_destroy(struct display_stream_compressor **dsc) } -static void destruct(struct dcn20_resource_pool *pool) +static void dcn20_resource_destruct(struct dcn20_resource_pool *pool) { unsigned int i; @@ -2886,7 +2886,7 @@ static void dcn20_destroy_resource_pool(struct resource_pool **pool) { struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool); - destruct(dcn20_pool); + dcn20_resource_destruct(dcn20_pool); kfree(dcn20_pool); *pool = NULL; } @@ -3342,7 +3342,7 @@ static bool init_soc_bounding_box(struct dc *dc, return true; } -static bool construct( +static bool dcn20_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn20_resource_pool *pool) @@ -3659,7 +3659,7 @@ static bool construct( create_fail: - destruct(pool); + dcn20_resource_destruct(pool); return false; } @@ -3674,7 +3674,7 @@ struct resource_pool *dcn20_create_resource_pool( if (!pool) return NULL; - if (construct(init_data->num_virtual_links, dc, pool)) + if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 8e69346ff27c..3e7215a464a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -844,7 +844,7 @@ enum dcn20_clk_src_array_id { DCN20_CLK_SRC_TOTAL_DCN21 }; -static void destruct(struct dcn21_resource_pool *pool) +static void dcn21_resource_destruct(struct dcn21_resource_pool *pool) { unsigned int i; @@ -1146,7 +1146,7 @@ static void dcn21_destroy_resource_pool(struct resource_pool **pool) { struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool); - destruct(dcn21_pool); + dcn21_resource_destruct(dcn21_pool); kfree(dcn21_pool); *pool = NULL; } @@ -1624,7 +1624,7 @@ static struct resource_funcs dcn21_res_pool_funcs = { .update_bw_bounding_box = update_bw_bounding_box }; -static bool construct( +static bool dcn21_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn21_resource_pool *pool) @@ -1876,7 +1876,7 @@ static bool construct( create_fail: - destruct(pool); + dcn21_resource_destruct(pool); return false; } @@ -1891,7 +1891,7 @@ struct resource_pool *dcn21_create_resource_pool( if (!pool) return NULL; - if (construct(init_data->num_virtual_links, dc, pool)) + if (dcn21_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c index a9aee1332330..1ae153eab31d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c @@ -48,18 +48,18 @@ struct gpio; -static void destruct( +static void dal_hw_ddc_destruct( struct hw_ddc *pin) { dal_hw_gpio_destruct(&pin->base); } -static void destroy( +static void dal_hw_ddc_destroy( struct hw_gpio_pin **ptr) { struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr); - destruct(pin); + dal_hw_ddc_destruct(pin); kfree(pin); @@ -207,7 +207,7 @@ static enum gpio_result set_config( } static const struct hw_gpio_pin_funcs funcs = { - .destroy = destroy, + .destroy = dal_hw_ddc_destroy, .open = dal_hw_gpio_open, .get_value = dal_hw_gpio_get_value, .set_value = dal_hw_gpio_set_value, @@ -216,7 +216,7 @@ static const struct hw_gpio_pin_funcs funcs = { .close = dal_hw_gpio_close, }; -static void construct( +static void dal_hw_ddc_construct( struct hw_ddc *ddc, enum gpio_id id, uint32_t en, @@ -243,7 +243,7 @@ void dal_hw_ddc_init( return; } - construct(*hw_ddc, id, en, ctx); + dal_hw_ddc_construct(*hw_ddc, id, en, ctx); } struct hw_gpio_pin *dal_hw_ddc_get_pin(struct gpio *gpio) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c index 69b899741f6d..f9e847e6555d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c @@ -46,22 +46,13 @@ struct gpio; -static void dal_hw_generic_construct( - struct hw_generic *pin, - enum gpio_id id, - uint32_t en, - struct dc_context *ctx) -{ - dal_hw_gpio_construct(&pin->base, id, en, ctx); -} - static void dal_hw_generic_destruct( struct hw_generic *pin) { dal_hw_gpio_destruct(&pin->base); } -static void destroy( +static void dal_hw_generic_destroy( struct hw_gpio_pin **ptr) { struct hw_generic *generic = HW_GENERIC_FROM_BASE(*ptr); @@ -90,7 +81,7 @@ static enum gpio_result set_config( } static const struct hw_gpio_pin_funcs funcs = { - .destroy = destroy, + .destroy = dal_hw_generic_destroy, .open = dal_hw_gpio_open, .get_value = dal_hw_gpio_get_value, .set_value = dal_hw_gpio_set_value, @@ -99,14 +90,14 @@ static const struct hw_gpio_pin_funcs funcs = { .close = dal_hw_gpio_close, }; -static void construct( - struct hw_generic *generic, +static void dal_hw_generic_construct( + struct hw_generic *pin, enum gpio_id id, uint32_t en, struct dc_context *ctx) { - dal_hw_generic_construct(generic, id, en, ctx); - generic->base.base.funcs = &funcs; + dal_hw_gpio_construct(&pin->base, id, en, ctx); + pin->base.base.funcs = &funcs; } void dal_hw_generic_init( @@ -126,7 +117,7 @@ void dal_hw_generic_init( return; } - construct(*hw_generic, id, en, ctx); + dal_hw_generic_construct(*hw_generic, id, en, ctx); } diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c index 00c9bcf660a3..692f29de7797 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c @@ -46,34 +46,18 @@ struct gpio; -static void dal_hw_hpd_construct( - struct hw_hpd *pin, - enum gpio_id id, - uint32_t en, - struct dc_context *ctx) -{ - dal_hw_gpio_construct(&pin->base, id, en, ctx); -} - static void dal_hw_hpd_destruct( struct hw_hpd *pin) { dal_hw_gpio_destruct(&pin->base); } - -static void destruct( - struct hw_hpd *hpd) -{ - dal_hw_hpd_destruct(hpd); -} - -static void destroy( +static void dal_hw_hpd_destroy( struct hw_gpio_pin **ptr) { struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr); - destruct(hpd); + dal_hw_hpd_destruct(hpd); kfree(hpd); @@ -120,7 +104,7 @@ static enum gpio_result set_config( } static const struct hw_gpio_pin_funcs funcs = { - .destroy = destroy, + .destroy = dal_hw_hpd_destroy, .open = dal_hw_gpio_open, .get_value = get_value, .set_value = dal_hw_gpio_set_value, @@ -129,14 +113,14 @@ static const struct hw_gpio_pin_funcs funcs = { .close = dal_hw_gpio_close, }; -static void construct( - struct hw_hpd *hpd, +static void dal_hw_hpd_construct( + struct hw_hpd *pin, enum gpio_id id, uint32_t en, struct dc_context *ctx) { - dal_hw_hpd_construct(hpd, id, en, ctx); - hpd->base.base.funcs = &funcs; + dal_hw_gpio_construct(&pin->base, id, en, ctx); + pin->base.base.funcs = &funcs; } void dal_hw_hpd_init( @@ -156,7 +140,7 @@ void dal_hw_hpd_init( return; } - construct(*hw_hpd, id, en, ctx); + dal_hw_hpd_construct(*hw_hpd, id, en, ctx); } struct hw_gpio_pin *dal_hw_hpd_get_pin(struct gpio *gpio) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index 1a581c464345..3d4461a70f7d 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -403,7 +403,7 @@ static const struct irq_service_funcs irq_service_funcs_dce110 = { .to_dal_irq_source = to_dal_irq_source_dce110 }; -static void construct(struct irq_service *irq_service, +static void dce110_irq_construct(struct irq_service *irq_service, struct irq_service_init_data *init_data) { dal_irq_service_construct(irq_service, init_data); @@ -421,6 +421,6 @@ dal_irq_service_dce110_create(struct irq_service_init_data *init_data) if (!irq_service) return NULL; - construct(irq_service, init_data); + dce110_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c index 15380336cb51..2fe4703395f3 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c @@ -273,7 +273,7 @@ static const struct irq_service_funcs irq_service_funcs_dce120 = { .to_dal_irq_source = to_dal_irq_source_dce110 }; -static void construct( +static void dce120_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -292,6 +292,6 @@ struct irq_service *dal_irq_service_dce120_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dce120_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c index 281fee8ad1e5..17e426b80a00 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c @@ -283,7 +283,7 @@ static const struct irq_service_funcs irq_service_funcs_dce80 = { .to_dal_irq_source = to_dal_irq_source_dce110 }; -static void construct( +static void dce80_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -302,7 +302,7 @@ struct irq_service *dal_irq_service_dce80_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dce80_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c index cc8e7dedccce..f956b3bde680 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c @@ -355,7 +355,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn10 = { .to_dal_irq_source = to_dal_irq_source_dcn10 }; -static void construct( +static void dcn10_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -374,6 +374,6 @@ struct irq_service *dal_irq_service_dcn10_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dcn10_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 5db29bf582d3..2a1fea501f8c 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -359,7 +359,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn20 = { .to_dal_irq_source = to_dal_irq_source_dcn20 }; -static void construct( +static void dcn20_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -378,6 +378,6 @@ struct irq_service *dal_irq_service_dcn20_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dcn20_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index cbe7818529bb..1b971265418b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -350,7 +350,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn21 = { .to_dal_irq_source = to_dal_irq_source_dcn21 }; -static void construct( +static void dcn21_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -369,6 +369,6 @@ struct irq_service *dal_irq_service_dcn21_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dcn21_irq_construct(irq_service, init_data); return irq_service; } -- cgit From 2057b7e1cf77bdf090a3571a8d2ca00a76f34a9e Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Tue, 15 Oct 2019 15:12:57 -0400 Subject: drm/amd/display: add color space option when sending link test pattern [why] In the TEST_MSIC dpcd register field definition, the test equipment has the option to choose between YCbCr601 or YCbCr709. We will apply corresponding YCbCr coefficient based on this test request. [how] Add a new input parameter in dc_link_dp_set_test_pattern to allow the selection between different color space. Signed-off-by: Wenjing Liu Reviewed-by: Nikola Cornij Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 1 + drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 ++ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 39 ++++++++++++++++++++-- drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 10 +++--- drivers/gpu/drm/amd/display/dc/dc_link.h | 2 ++ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 9 ++++- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c | 16 ++++++++- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 7 ++++ drivers/gpu/drm/amd/display/dc/inc/hw/opp.h | 1 + .../drm/amd/display/include/link_service_types.h | 7 ++++ 11 files changed, 85 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index bdb37e611015..f81d3439ee8c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -657,6 +657,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us dc_link_set_test_pattern( link, test_pattern, + DP_TEST_PATTERN_COLOR_SPACE_RGB, &link_training_settings, custom_pattern, 10); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index cfb8f9010673..123b79dcd8e4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3319,6 +3319,7 @@ void dc_link_disable_hpd(const struct dc_link *link) void dc_link_set_test_pattern(struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size) @@ -3327,6 +3328,7 @@ void dc_link_set_test_pattern(struct dc_link *link, dc_link_dp_set_test_pattern( link, test_pattern, + test_pattern_color_space, p_link_settings, p_custom_pattern, cust_pattern_size); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index b72db01afeed..272261192e82 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2493,6 +2493,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) dc_link_dp_set_test_pattern( link, test_pattern, + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED, &link_training_settings, test_80_bit_pattern, (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - @@ -2504,6 +2505,8 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) union link_test_pattern dpcd_test_pattern; union test_misc dpcd_test_params; enum dp_test_pattern test_pattern; + enum dp_test_pattern_color_space test_pattern_color_space = + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern)); memset(&dpcd_test_params, 0, sizeof(dpcd_test_params)); @@ -2538,9 +2541,14 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) break; } + test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? + DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : + DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; + dc_link_dp_set_test_pattern( link, test_pattern, + test_pattern_color_space, NULL, NULL, 0); @@ -3426,7 +3434,8 @@ static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) static void set_crtc_test_pattern(struct dc_link *link, struct pipe_ctx *pipe_ctx, - enum dp_test_pattern test_pattern) + enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space) { enum controller_dp_test_pattern controller_test_pattern; enum dc_color_depth color_depth = pipe_ctx-> @@ -3484,8 +3493,27 @@ static void set_crtc_test_pattern(struct dc_link *link, controller_test_pattern, color_depth); else if (opp->funcs->opp_set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; + enum controller_dp_color_space controller_color_space; int opp_cnt = 1; + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709; + break; + case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED: + default: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; + DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); + ASSERT(0); + break; + } + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; @@ -3497,6 +3525,7 @@ static void set_crtc_test_pattern(struct dc_link *link, odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp, controller_test_pattern, + controller_color_space, color_depth, NULL, width, @@ -3504,6 +3533,7 @@ static void set_crtc_test_pattern(struct dc_link *link, } opp->funcs->opp_set_disp_pattern_generator(opp, controller_test_pattern, + controller_color_space, color_depth, NULL, width, @@ -3535,6 +3565,7 @@ static void set_crtc_test_pattern(struct dc_link *link, odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, width, @@ -3542,6 +3573,7 @@ static void set_crtc_test_pattern(struct dc_link *link, } opp->funcs->opp_set_disp_pattern_generator(opp, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, width, @@ -3558,6 +3590,7 @@ static void set_crtc_test_pattern(struct dc_link *link, bool dc_link_dp_set_test_pattern( struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size) @@ -3586,7 +3619,7 @@ bool dc_link_dp_set_test_pattern( if (link->test_pattern_enabled && test_pattern == DP_TEST_PATTERN_VIDEO_MODE) { /* Set CRTC Test Pattern */ - set_crtc_test_pattern(link, pipe_ctx, test_pattern); + set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); dp_set_hw_test_pattern(link, test_pattern, (uint8_t *)p_custom_pattern, (uint32_t)cust_pattern_size); @@ -3701,7 +3734,7 @@ bool dc_link_dp_set_test_pattern( } } else { /* CRTC Patterns */ - set_crtc_test_pattern(link, pipe_ctx, test_pattern); + set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); /* Set Test Pattern state */ link->test_pattern_enabled = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 1b68d7c13085..dfe4472c9e40 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -522,14 +522,14 @@ union link_test_pattern { union test_misc { struct dpcd_test_misc_bits { - unsigned char SYNC_CLOCK :1; + unsigned char SYNC_CLOCK :1; /* dpcd_test_color_format */ - unsigned char CLR_FORMAT :2; + unsigned char CLR_FORMAT :2; /* dpcd_test_dyn_range */ - unsigned char DYN_RANGE :1; - unsigned char YCBCR :1; + unsigned char DYN_RANGE :1; + unsigned char YCBCR_COEFS :1; /* dpcd_test_bit_depth */ - unsigned char BPC :3; + unsigned char BPC :3; } bits; unsigned char raw; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 314d2043cd78..1ff79f703734 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -257,6 +257,7 @@ void dc_link_dp_disable_hpd(const struct dc_link *link); bool dc_link_dp_set_test_pattern( struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size); @@ -288,6 +289,7 @@ void dc_link_enable_hpd(const struct dc_link *link); void dc_link_disable_hpd(const struct dc_link *link); void dc_link_set_test_pattern(struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 0046a099c9e9..4d36b9e415f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -223,6 +223,7 @@ void dcn20_init_blank( opp->funcs->opp_set_disp_pattern_generator( opp, CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, @@ -232,6 +233,7 @@ void dcn20_init_blank( bottom_opp->funcs->opp_set_disp_pattern_generator( bottom_opp, CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, @@ -851,6 +853,7 @@ void dcn20_blank_pixel_data( struct dc_stream_state *stream = pipe_ctx->stream; enum dc_color_space color_space = stream->output_color_space; enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; + enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; struct pipe_ctx *odm_pipe; int odm_cnt = 1; @@ -869,8 +872,10 @@ void dcn20_blank_pixel_data( if (stream_res->abm) stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm); - if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) + if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; + test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + } } else { test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; } @@ -878,6 +883,7 @@ void dcn20_blank_pixel_data( stream_res->opp->funcs->opp_set_disp_pattern_generator( stream_res->opp, test_pattern, + test_pattern_color_space, stream->timing.display_color_depth, &black_color, width, @@ -888,6 +894,7 @@ void dcn20_blank_pixel_data( odm_pipe->stream_res.opp, dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, + test_pattern_color_space, stream->timing.display_color_depth, &black_color, width, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 40164ed015ea..023cc71fad0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -41,6 +41,7 @@ void opp2_set_disp_pattern_generator( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, @@ -100,9 +101,22 @@ void opp2_set_disp_pattern_generator( TEST_PATTERN_DYN_RANGE_CEA : TEST_PATTERN_DYN_RANGE_VESA); + switch (color_space) { + case CONTROLLER_DP_COLOR_SPACE_YCBCR601: + mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601; + break; + case CONTROLLER_DP_COLOR_SPACE_YCBCR709: + mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709; + break; + case CONTROLLER_DP_COLOR_SPACE_RGB: + default: + mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; + break; + } + REG_UPDATE_6(DPG_CONTROL, DPG_EN, 1, - DPG_MODE, TEST_PATTERN_MODE_COLORSQUARES_RGB, + DPG_MODE, mode, DPG_DYNAMIC_RANGE, dyn_range, DPG_BIT_DEPTH, bit_depth, DPG_VRES, 6, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index abd8de9a78f8..4093bec172c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -140,6 +140,7 @@ void dcn20_opp_construct(struct dcn20_opp *oppn20, void opp2_set_disp_pattern_generator( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 99ae8181d429..75d419081e76 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -245,6 +245,13 @@ enum controller_dp_test_pattern { CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR }; +enum controller_dp_color_space { + CONTROLLER_DP_COLOR_SPACE_RGB, + CONTROLLER_DP_COLOR_SPACE_YCBCR601, + CONTROLLER_DP_COLOR_SPACE_YCBCR709, + CONTROLLER_DP_COLOR_SPACE_UDEFINED +}; + enum dc_lut_mode { LUT_BYPASS, LUT_RAM_A, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index e2d960e5fc1c..7575564b2265 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -306,6 +306,7 @@ struct opp_funcs { void (*opp_set_disp_pattern_generator)( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 876b0b3e1a9c..4869d4562e4d 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -123,6 +123,13 @@ enum dp_test_pattern { DP_TEST_PATTERN_UNSUPPORTED }; +enum dp_test_pattern_color_space { + DP_TEST_PATTERN_COLOR_SPACE_RGB, + DP_TEST_PATTERN_COLOR_SPACE_YCBCR601, + DP_TEST_PATTERN_COLOR_SPACE_YCBCR709, + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED +}; + enum dp_panel_mode { /* not required */ DP_PANEL_MODE_DEFAULT, -- cgit From ad4e140e9bccc4a5a73651bfce913d7a3edaf0bb Mon Sep 17 00:00:00 2001 From: Joshua Aberback Date: Fri, 1 Nov 2019 17:29:20 -0400 Subject: drm/amd/display: Adjust DML workaround threshold [Why] There is a case where the margin is between 50 and 60, but applying the workaround causes a hang. By increasing the threshold, we are blocking more cases from switching p-state during active, but those cases will fall back to switching during blank, which is fine. [How] - increase required margin from 50 to 60 Signed-off-by: Joshua Aberback Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c | 2 +- drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 77b7574c63cb..3b224b155e8c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -2578,7 +2578,7 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer + mode_lib->vba.DRAMClockChangeLatency; if (mode_lib->vba.DRAMClockChangeSupportsVActive && - mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { + mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 62dfd36d830a..6482d7b99bae 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -2612,7 +2612,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP + mode_lib->vba.DRAMClockChangeLatency; if (mode_lib->vba.DRAMClockChangeSupportsVActive && - mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { + mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else if (mode_lib->vba.DummyPStateCheck && -- cgit From b9e9f11c9145a2f5ffb50adf450c649fadd54e02 Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Sat, 26 Oct 2019 10:19:40 -0400 Subject: drm/amd/display: Add debug trace for dmcub FW autoload. [Why & How] 1. Add trace code enum for easy debugging. 2. Add trace during uC boot up, including loading phy FW and dmcu FW. 3. Change cache memory type back to write back, since write through has issue when resume from S0i3 100% hang after 3.2ms. 4. Change CW3 base address to hard code value to avoid memory overlap with cw1. 5. Change polling phy init done to infinite loop to avoid dcn hang when dmcub uC stalled. 6. Add dmcub FW dis-assembly file to repositatory for debug purpose. Signed-off-by: Yongqiang Sun Reviewed-by: Tony Cheng Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- .../drm/amd/display/dmub/inc/dmub_trace_buffer.h | 21 +++++++++++++++++++-- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 2 +- drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 20 +++++++++++++------- 3 files changed, 33 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h index 9707706ba8ce..b0ee099d8a6e 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h @@ -30,8 +30,25 @@ #define LOAD_DMCU_FW 1 #define LOAD_PHY_FW 2 + +enum dmucb_trace_code { + DMCUB__UNKNOWN, + DMCUB__MAIN_BEGIN, + DMCUB__PHY_INIT_BEGIN, + DMCUB__PHY_FW_SRAM_LOAD_BEGIN, + DMCUB__PHY_FW_SRAM_LOAD_END, + DMCUB__PHY_INIT_POLL_DONE, + DMCUB__PHY_INIT_END, + DMCUB__DMCU_ERAM_LOAD_BEGIN, + DMCUB__DMCU_ERAM_LOAD_END, + DMCUB__DMCU_ISR_LOAD_BEGIN, + DMCUB__DMCU_ISR_LOAD_END, + DMCUB__MAIN_IDLE, + DMCUB__PERF_TRACE, +}; + struct dmcub_trace_buf_entry { - uint32_t trace_code; + enum dmucb_trace_code trace_code; uint32_t tick_count; uint32_t param0; uint32_t param1; @@ -40,6 +57,7 @@ struct dmcub_trace_buf_entry { #define TRACE_BUF_SIZE (1024) //1 kB #define PERF_TRACE_MAX_ENTRY ((TRACE_BUF_SIZE - 8)/sizeof(struct dmcub_trace_buf_entry)) + struct dmcub_trace_buf { uint32_t entry_count; uint32_t clk_freq; @@ -47,5 +65,4 @@ struct dmcub_trace_buf { }; - #endif /* _DMUB_TRACE_BUFFER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index 89fd27758dd5..e2b2cf2e01fd 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -138,5 +138,5 @@ bool dmub_dcn20_is_supported(struct dmub_srv *dmub) bool dmub_dcn20_is_phy_init(struct dmub_srv *dmub) { - return REG_READ(DMCUB_SCRATCH10) != 0; + return REG_READ(DMCUB_SCRATCH10) == 0; } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 0dd32edbbcb3..5ae1906ff1b1 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -26,6 +26,8 @@ #include "../inc/dmub_srv.h" #include "dmub_dcn20.h" #include "dmub_dcn21.h" +#include "dmub_trace_buffer.h" +#include "os_types.h" /* * Note: the DMUB service is standalone. No additional headers should be * added below or above this line unless they reside within the DMUB @@ -44,8 +46,6 @@ /* Mailbox size */ #define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE) -/* Tracebuffer size */ -#define DMUB_TRACEBUFF_SIZE (1024) //1kB buffer /* Number of windows in use. */ #define DMUB_NUM_WINDOWS (DMUB_WINDOW_5_TRACEBUFF + 1) @@ -53,6 +53,7 @@ #define DMUB_CW0_BASE (0x60000000) #define DMUB_CW1_BASE (0x61000000) +#define DMUB_CW3_BASE (0x63000000) #define DMUB_CW5_BASE (0x65000000) static inline uint32_t dmub_align(uint32_t val, uint32_t factor) @@ -181,7 +182,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, mail->top = mail->base + DMUB_MAILBOX_SIZE; trace_buff->base = dmub_align(mail->top, 256); - trace_buff->top = trace_buff->base + DMUB_TRACEBUFF_SIZE; + trace_buff->top = trace_buff->base + TRACE_BUF_SIZE; out->fb_size = dmub_align(trace_buff->top, 4096); @@ -291,7 +292,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, cw2.region.top = cw2.region.base + data_fb->size; cw3.offset.quad_part = bios_fb->gpu_addr; - cw3.region.base = DMUB_CW1_BASE + stack_fb->size; + cw3.region.base = DMUB_CW3_BASE; cw3.region.top = cw3.region.base + bios_fb->size; cw4.offset.quad_part = mail_fb->gpu_addr; @@ -394,19 +395,24 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, uint32_t timeout_us) { - uint32_t i; + uint32_t i = 0; if (!dmub->hw_init || !dmub->hw_funcs.is_phy_init) return DMUB_STATUS_INVALID; - for (i = 0; i <= timeout_us; i += 10) { +/* for (i = 0; i <= timeout_us; i += 10) { if (dmub->hw_funcs.is_phy_init(dmub)) return DMUB_STATUS_OK; udelay(10); + }*/ + while (!dmub->hw_funcs.is_phy_init(dmub)) { + ASSERT(i <= timeout_us); + i += 10; + udelay(10); } - return DMUB_STATUS_TIMEOUT; + return DMUB_STATUS_OK; } enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, -- cgit From 53db058581b2f19e9aaf15762592c31be9c780f2 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 4 Nov 2019 08:31:14 -0500 Subject: drm/amd/display: 3.2.60 Signed-off-by: Aric Cyr Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 4c6c2fcc6a96..da9cb7dd22e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.59" +#define DC_VER "3.2.60" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit From 9185e8adb4cc9aa628a2c9b47b43d193cf268ee5 Mon Sep 17 00:00:00 2001 From: "David (Dingchen) Zhang" Date: Thu, 31 Oct 2019 14:36:51 -0400 Subject: drm/amd/display: add debugfs sdp hook up function for Navi [why] need to send immediate SDP message via debugfs on Navi board. [how] hook up the DCN1x encoder function of sending immediate sdp message to DCN2. Signed-off-by: David (Dingchen) Zhang Reviewed-by: Harry Wentland Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 33cc40fb9687..be0978401476 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -561,6 +561,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { enc2_stream_encoder_stop_hdmi_info_packets, .update_dp_info_packets = enc2_stream_encoder_update_dp_info_packets, + .send_immediate_sdp_message = + enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, .dp_blank = -- cgit From 46250a0cba4ce9c9754b922ef89a2aa484209164 Mon Sep 17 00:00:00 2001 From: Michael Strauss Date: Sun, 3 Nov 2019 09:35:03 -0500 Subject: drm/amd/display: Avoid conflict between HDR multiplier and 3dlut [WHY] There can be a conflict between OS HDR multiplier and 3dlut HDR multiplier, which are both sent to DC. [HOW] Instead of having dc determine which HDR multiplier to use, make the decision in dm and send only the intended value in a surface update. Store the current OS HDR multiplier and determine whether to use it or the 3dlut's multiplier before sending the surface update to dc. Send multiplier to dc in fixed31_32 format, dc then converts it to hw format. Signed-off-by: Michael Strauss Reviewed-by: Krunoslav Kovac Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 17 ++++++++++------- drivers/gpu/drm/amd/display/dc/dc.h | 9 ++++----- .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 10 +++++++--- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 10 +--------- 4 files changed, 22 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6089c90ad6cd..a25172ba0782 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1469,11 +1469,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa elevate_update_type(&update_type, UPDATE_TYPE_MED); } - if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) { - update_flags->bits.sdr_white_level = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); - } - if (u->plane_info->dcc.enable != u->surface->dcc.enable || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { @@ -1620,6 +1615,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc, update_flags->bits.gamma_change = 1; } + if (u->hdr_mult.value) + if (u->hdr_mult.value != u->surface->hdr_mult.value) { + update_flags->bits.hdr_mult = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_MED); + } + if (update_flags->bits.in_transfer_func_change) { type = UPDATE_TYPE_MED; elevate_update_type(&overall_type, type); @@ -1801,8 +1802,6 @@ static void copy_surface_update_to_plane( srf_update->plane_info->global_alpha_value; surface->dcc = srf_update->plane_info->dcc; - surface->sdr_white_level = - srf_update->plane_info->sdr_white_level; surface->layer_index = srf_update->plane_info->layer_index; } @@ -1847,6 +1846,10 @@ static void copy_surface_update_to_plane( memcpy(surface->lut3d_func, srf_update->lut3d_func, sizeof(*surface->lut3d_func)); + if (srf_update->hdr_mult.value) + surface->hdr_mult = + srf_update->hdr_mult; + if (srf_update->blend_tf && (surface->blend_tf != srf_update->blend_tf)) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index da9cb7dd22e6..3cb361917b4b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -667,7 +667,7 @@ union dc_3dlut_state { struct dc_3dlut { struct kref refcount; struct tetrahedral_params lut_3d; - uint32_t hdr_multiplier; + struct fixed31_32 hdr_multiplier; bool initialized; /*remove after diag fix*/ union dc_3dlut_state state; struct dc_context *ctx; @@ -694,7 +694,7 @@ union surface_update_flags { uint32_t horizontal_mirror_change:1; uint32_t per_pixel_alpha_change:1; uint32_t global_alpha_change:1; - uint32_t sdr_white_level:1; + uint32_t hdr_mult:1; uint32_t rotation_change:1; uint32_t swizzle_change:1; uint32_t scaling_change:1; @@ -738,7 +738,7 @@ struct dc_plane_state { struct dc_bias_and_scale *bias_and_scale; struct dc_csc_transform input_csc_color_matrix; struct fixed31_32 coeff_reduction_factor; - uint32_t sdr_white_level; + struct fixed31_32 hdr_mult; // TODO: No longer used, remove struct dc_hdr_static_metadata hdr_static_ctx; @@ -783,7 +783,6 @@ struct dc_plane_info { enum dc_rotation_angle rotation; enum plane_stereo_format stereo_format; enum dc_color_space color_space; - unsigned int sdr_white_level; bool horizontal_mirror; bool visible; bool per_pixel_alpha; @@ -807,7 +806,7 @@ struct dc_surface_update { const struct dc_flip_addrs *flip_addr; const struct dc_plane_info *plane_info; const struct dc_scaling_info *scaling_info; - + struct fixed31_32 hdr_mult; /* following updates require alloc/sleep/spin that is not isr safe, * null means no updates */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index adba767ccf2e..f21a385a936f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2457,16 +2457,20 @@ static void dcn10_blank_pixel_data( void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) { - struct fixed31_32 multiplier = dc_fixpt_from_fraction( - pipe_ctx->plane_state->sdr_white_level, 80); + struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult; uint32_t hw_mult = 0x1f000; // 1.0 default multiplier struct custom_float_format fmt; + bool mult_negative; // True if fixed31_32 sign bit indicates negative value + uint32_t mult_int; // int component of fixed31_32 fmt.exponenta_bits = 6; fmt.mantissa_bits = 12; fmt.sign = true; - if (pipe_ctx->plane_state->sdr_white_level > 80) + mult_negative = multiplier.value >> 63 != 0; + mult_int = multiplier.value >> 32; + + if (mult_int && !mult_negative) // Check if greater than 1 convert_to_custom_float_format(multiplier, &fmt, &hw_mult); pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 4d36b9e415f1..868099fbe8ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -734,14 +734,6 @@ bool dcn20_set_shaper_3dlut( else result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); - if (plane_state->lut3d_func && - plane_state->lut3d_func->state.bits.initialized == 1 && - plane_state->lut3d_func->hdr_multiplier != 0) - dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, - plane_state->lut3d_func->hdr_multiplier); - else - dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000); - return result; } @@ -1382,7 +1374,7 @@ static void dcn20_program_pipe( dcn20_update_dchubp_dpp(dc, pipe_ctx, context); if (pipe_ctx->update_flags.bits.enable - || pipe_ctx->plane_state->update_flags.bits.sdr_white_level) + || pipe_ctx->plane_state->update_flags.bits.hdr_mult) set_hdr_multiplier(pipe_ctx); if (pipe_ctx->update_flags.bits.enable || -- cgit From 56fc13fe6caa67e1017e048763c9eaa32330187b Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 4 Nov 2019 13:32:46 -0500 Subject: drm/amd/display: Don't spin forever waiting for DMCUB phy/auto init [Why] It's an interface violation to use infinite loops within DMUB service functions and we'll lock up the kernel by doing so. [How] Revert the function back to its intended functionality. Move the infinite loops into DC/DM as necessary. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Sun peng Li Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 6 ++++-- drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 9 ++------- 2 files changed, 6 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index b65b66025267..8d348a5f5599 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -112,8 +112,10 @@ void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv) struct dc_context *dc_ctx = dc_dmub_srv->ctx; enum dmub_status status; - status = dmub_srv_wait_for_phy_init(dmub, 1000000); - if (status != DMUB_STATUS_OK) + status = dmub_srv_wait_for_phy_init(dmub, 10000000); + if (status != DMUB_STATUS_OK) { DC_ERROR("Error waiting for DMUB phy init: status=%d\n", status); + ASSERT(0); + } } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 5ae1906ff1b1..60c574a39c6a 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -400,19 +400,14 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, if (!dmub->hw_init || !dmub->hw_funcs.is_phy_init) return DMUB_STATUS_INVALID; -/* for (i = 0; i <= timeout_us; i += 10) { + for (i = 0; i <= timeout_us; i += 10) { if (dmub->hw_funcs.is_phy_init(dmub)) return DMUB_STATUS_OK; udelay(10); - }*/ - while (!dmub->hw_funcs.is_phy_init(dmub)) { - ASSERT(i <= timeout_us); - i += 10; - udelay(10); } - return DMUB_STATUS_OK; + return DMUB_STATUS_TIMEOUT; } enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, -- cgit From 78c7738211e027f122ada3f59c2c5e8a83ee3c59 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 29 Oct 2019 15:05:56 -0400 Subject: drm/amd/display: cleanup of function pointer tables [Why] It is becoming increasingly hard to figure out which function is called on the different DCN versions [How] 1. Make function pointer table init in its own init.c file 2. Remove other scenarios in hwseq.c file that need to include headers of other DCN versions. (If needed, it should have been done via the function pointers) Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/basics/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/basics/dc_common.c | 101 +++++ drivers/gpu/drm/amd/display/dc/basics/dc_common.h | 42 ++ drivers/gpu/drm/amd/display/dc/core/dc.c | 4 +- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 3 +- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 12 +- .../amd/display/dc/dce110/dce110_hw_sequencer.h | 1 - drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 3 +- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 436 ++++++--------------- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | 181 ++++++--- .../display/dc/dcn10/dcn10_hw_sequencer_debug.h | 43 ++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c | 105 +++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h | 33 ++ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 + drivers/gpu/drm/amd/display/dc/dcn20/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 426 ++++++++------------ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h | 150 ++++--- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 127 ++++++ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h | 33 ++ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 12 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h | 2 +- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 4 +- drivers/gpu/drm/amd/display/dc/dcn21/Makefile | 3 +- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c | 13 +- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h | 14 +- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c | 131 +++++++ drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h | 33 ++ .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 2 + drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 40 +- 29 files changed, 1243 insertions(+), 717 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/basics/dc_common.c create mode 100644 drivers/gpu/drm/amd/display/dc/basics/dc_common.h create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile index a50a76471107..7ad0cad0f4ef 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/Makefile +++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile @@ -25,7 +25,7 @@ # subcomponents. BASICS = conversion.o fixpt31_32.o \ - log_helpers.o vector.o + log_helpers.o vector.o dc_common.o AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS)) diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c new file mode 100644 index 000000000000..b2fc4f8e6482 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c @@ -0,0 +1,101 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dc_common.h" +#include "basics/conversion.h" + +bool is_rgb_cspace(enum dc_color_space output_color_space) +{ + switch (output_color_space) { + case COLOR_SPACE_SRGB: + case COLOR_SPACE_SRGB_LIMITED: + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + case COLOR_SPACE_ADOBERGB: + return true; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR601_LIMITED: + case COLOR_SPACE_YCBCR709_LIMITED: + case COLOR_SPACE_2020_YCBCR: + return false; + default: + /* Add a case to switch */ + BREAK_TO_DEBUGGER(); + return false; + } +} + +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ + if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) + return true; + if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) + return true; + return false; +} + +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ + if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) + return true; + if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) + return true; + return false; +} + +bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ + if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) + return true; + if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) + return true; + if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) + return true; + return false; +} + +void build_prescale_params(struct dc_bias_and_scale *bias_and_scale, + const struct dc_plane_state *plane_state) +{ + if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN + && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID + && plane_state->input_csc_color_matrix.enable_adjustment + && plane_state->coeff_reduction_factor.value != 0) { + bias_and_scale->scale_blue = fixed_point_to_int_frac( + dc_fixpt_mul(plane_state->coeff_reduction_factor, + dc_fixpt_from_fraction(256, 255)), + 2, + 13); + bias_and_scale->scale_red = bias_and_scale->scale_blue; + bias_and_scale->scale_green = bias_and_scale->scale_blue; + } else { + bias_and_scale->scale_blue = 0x2000; + bias_and_scale->scale_red = 0x2000; + bias_and_scale->scale_green = 0x2000; + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h new file mode 100644 index 000000000000..7c0cbf47e8ce --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h @@ -0,0 +1,42 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DC_COMMON_H__ +#define __DAL_DC_COMMON_H__ + +#include "core_types.h" + +bool is_rgb_cspace(enum dc_color_space output_color_space); + +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +void build_prescale_params(struct dc_bias_and_scale *bias_and_scale, + const struct dc_plane_state *plane_state); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index a25172ba0782..1395aff39980 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1973,11 +1973,11 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->periodic_interrupt0 && dc->hwss.setup_periodic_interrupt) - dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0); + dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0); if (stream_update->periodic_interrupt1 && dc->hwss.setup_periodic_interrupt) - dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1); + dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1); if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || stream_update->vrr_infopacket || diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 8d4ffc683204..d9afd834c146 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -27,6 +27,7 @@ #include #include "dm_services.h" +#include "basics/dc_common.h" #include "dc.h" #include "core_types.h" #include "resource.h" @@ -244,7 +245,7 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) if (stream->ctx->asic_id.chip_family == FAMILY_RV && ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { - vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx); + vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos)) return; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 3f5fbad587e7..1dc065f1125c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -61,6 +61,8 @@ #include "atomfirmware.h" +#define GAMMA_HW_POINTS_NUM 256 + /* * All values are in milliseconds; * For eDP, after power-up/power/down, @@ -268,7 +270,7 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params, } static bool -dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx, +dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; @@ -596,7 +598,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, } static bool -dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx, +dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) { struct transform *xfm = pipe_ctx->plane_res.xfm; @@ -1358,7 +1360,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( dc->hwss.enable_stream_timing(pipe_ctx, context, dc); if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(pipe_ctx); + dc->hwss.setup_vupdate_interrupt(dc, pipe_ctx); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -2501,10 +2503,10 @@ static void dce110_program_front_end_for_pipe( if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + dc->hwss.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + dc->hwss.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); DC_LOG_SURFACE( "Pipe:%d %p: addr hi:0x%x, " diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index 2f9b7dbdf415..c639e1680b7b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -28,7 +28,6 @@ #include "core_types.h" -#define GAMMA_HW_POINTS_NUM 256 struct dc; struct dc_state; struct dm_pp_display_configuration; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 032f872be89c..62ad1a11bff9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -22,7 +22,8 @@ # # Makefile for DCN. -DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \ +DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \ + dcn10_hw_sequencer_debug.o \ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ dcn10_hubp.o dcn10_mpc.o \ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index f21a385a936f..251bb59c271a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -25,17 +25,18 @@ #include #include "dm_services.h" +#include "basics/dc_common.h" #include "core_types.h" #include "resource.h" #include "custom_float.h" #include "dcn10_hw_sequencer.h" -#include "dce110/dce110_hw_sequencer.h" +#include "dcn10_hw_sequencer_debug.h" #include "dce/dce_hwseq.h" #include "abm.h" #include "dmcu.h" #include "dcn10_optc.h" -#include "dcn10/dcn10_dpp.h" -#include "dcn10/dcn10_mpc.h" +#include "dcn10_dpp.h" +#include "dcn10_mpc.h" #include "timing_generator.h" #include "opp.h" #include "ipp.h" @@ -66,6 +67,8 @@ #define DTN_INFO_MICRO_SEC(ref_cycle) \ print_microsec(dc_ctx, log_ctx, ref_cycle) +#define GAMMA_HW_POINTS_NUM 256 + void print_microsec(struct dc_context *dc_ctx, struct dc_log_buffer_ctx *log_ctx, uint32_t ref_cycle) @@ -79,6 +82,33 @@ void print_microsec(struct dc_context *dc_ctx, us_x10 % frac); } +static void dcn10_lock_all_pipes(struct dc *dc, + struct dc_state *context, + bool lock) +{ + struct pipe_ctx *pipe_ctx; + struct timing_generator *tg; + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx = &context->res_ctx.pipe_ctx[i]; + tg = pipe_ctx->stream_res.tg; + /* + * Only lock the top pipe's tg to prevent redundant + * (un)locking. Also skip if pipe is disabled. + */ + if (pipe_ctx->top_pipe || + !pipe_ctx->stream || !pipe_ctx->plane_state || + !tg->funcs->is_tg_enabled(tg)) + continue; + + if (lock) + tg->funcs->lock(tg); + else + tg->funcs->unlock(tg); + } +} + static void log_mpc_crc(struct dc *dc, struct dc_log_buffer_ctx *log_ctx) { @@ -445,7 +475,7 @@ bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx) return false; } -static void dcn10_enable_power_gating_plane( +void dcn10_enable_power_gating_plane( struct dce_hwseq *hws, bool enable) { @@ -467,7 +497,7 @@ static void dcn10_enable_power_gating_plane( REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on); } -static void dcn10_disable_vga( +void dcn10_disable_vga( struct dce_hwseq *hws) { unsigned int in_vga1_mode = 0; @@ -500,7 +530,7 @@ static void dcn10_disable_vga( REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1); } -static void dcn10_dpp_pg_control( +void dcn10_dpp_pg_control( struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) @@ -552,7 +582,7 @@ static void dcn10_dpp_pg_control( } } -static void dcn10_hubp_pg_control( +void dcn10_hubp_pg_control( struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) @@ -671,7 +701,7 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc) hws->wa_state.DEGVIDCN10_253_applied = true; } -static void dcn10_bios_golden_init(struct dc *dc) +void dcn10_bios_golden_init(struct dc *dc) { struct dc_bios *bp = dc->ctx->dc_bios; int i; @@ -737,7 +767,7 @@ static void false_optc_underflow_wa( tg->funcs->clear_optc_underflow(tg); } -static enum dc_status dcn10_enable_stream_timing( +enum dc_status dcn10_enable_stream_timing( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -983,7 +1013,7 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) } /* trigger HW to start disconnect plane from stream on the next vsync */ -void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; int dpp_id = pipe_ctx->plane_res.dpp->inst; @@ -1009,10 +1039,10 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->funcs->hubp_disconnect(hubp); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); } -static void dcn10_plane_atomic_power_down(struct dc *dc, +void dcn10_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp) { @@ -1035,7 +1065,7 @@ static void dcn10_plane_atomic_power_down(struct dc *dc, /* disable HW used by plane. * note: cannot disable until disconnect is complete */ -static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; @@ -1067,7 +1097,7 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state = NULL; } -static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) { DC_LOGGER_INIT(dc->ctx->logger); @@ -1082,7 +1112,7 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->pipe_idx); } -static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) +void dcn10_init_pipes(struct dc *dc, struct dc_state *context) { int i; bool can_apply_seamless_boot = false; @@ -1181,7 +1211,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) } } -static void dcn10_init_hw(struct dc *dc) +void dcn10_init_hw(struct dc *dc) { int i; struct abm *abm = dc->res_pool->abm; @@ -1313,7 +1343,7 @@ static void dcn10_init_hw(struct dc *dc) } -static void dcn10_reset_hw_ctx_wrap( +void dcn10_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context) { @@ -1370,9 +1400,7 @@ static bool patch_address_for_sbs_tb_stereo( return false; } - - -static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) { bool addr_patched = false; PHYSICAL_ADDRESS_LOC addr; @@ -1397,8 +1425,8 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; } -static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state) +bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; const struct dc_transfer_func *tf = NULL; @@ -1475,9 +1503,8 @@ static void log_tf(struct dc_context *ctx, } } -static bool -dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_stream_state *stream) +bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream) { struct dpp *dpp = pipe_ctx->plane_res.dpp; @@ -1513,7 +1540,7 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, return true; } -static void dcn10_pipe_control_lock( +void dcn10_pipe_control_lock( struct dc *dc, struct pipe_ctx *pipe, bool lock) @@ -1525,7 +1552,7 @@ static void dcn10_pipe_control_lock( return; if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); if (lock) pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); @@ -1533,7 +1560,7 @@ static void dcn10_pipe_control_lock( pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); } static bool wait_for_reset_trigger_to_occur( @@ -1573,7 +1600,7 @@ static bool wait_for_reset_trigger_to_occur( return rc; } -static void dcn10_enable_timing_synchronization( +void dcn10_enable_timing_synchronization( struct dc *dc, int group_index, int group_size, @@ -1603,7 +1630,7 @@ static void dcn10_enable_timing_synchronization( DC_SYNC_INFO("Sync complete\n"); } -static void dcn10_enable_per_frame_crtc_position_reset( +void dcn10_enable_per_frame_crtc_position_reset( struct dc *dc, int group_size, struct pipe_ctx *grouped_pipes[]) @@ -1841,7 +1868,7 @@ static void dcn10_enable_plane( struct dce_hwseq *hws = dc->hwseq; if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); } undo_DEGVIDCN10_253_wa(dc); @@ -1898,11 +1925,11 @@ static void dcn10_enable_plane( dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp); if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); } } -static void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) +void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) { int i = 0; struct dpp_grph_csc_adjustment adjust; @@ -1950,7 +1977,7 @@ static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint matrix[11] = rgb_bias; } -static void dcn10_program_output_csc(struct dc *dc, +void dcn10_program_output_csc(struct dc *dc, struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace, uint16_t *matrix, @@ -1982,57 +2009,6 @@ static void dcn10_program_output_csc(struct dc *dc, } } -bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ - if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) - return true; - if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) - return true; - return false; -} - -bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ - if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) - return true; - if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) - return true; - return false; -} - -bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ - if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) - return true; - if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) - return true; - if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) - return true; - return false; -} - -bool is_rgb_cspace(enum dc_color_space output_color_space) -{ - switch (output_color_space) { - case COLOR_SPACE_SRGB: - case COLOR_SPACE_SRGB_LIMITED: - case COLOR_SPACE_2020_RGB_FULLRANGE: - case COLOR_SPACE_2020_RGB_LIMITEDRANGE: - case COLOR_SPACE_ADOBERGB: - return true; - case COLOR_SPACE_YCBCR601: - case COLOR_SPACE_YCBCR709: - case COLOR_SPACE_YCBCR601_LIMITED: - case COLOR_SPACE_YCBCR709_LIMITED: - case COLOR_SPACE_2020_YCBCR: - return false; - default: - /* Add a case to switch */ - BREAK_TO_DEBUGGER(); - return false; - } -} - void dcn10_get_surface_visual_confirm_color( const struct pipe_ctx *pipe_ctx, struct tg_color *color) @@ -2106,70 +2082,7 @@ void dcn10_get_hdr_visual_confirm_color( } } -static uint16_t fixed_point_to_int_frac( - struct fixed31_32 arg, - uint8_t integer_bits, - uint8_t fractional_bits) -{ - int32_t numerator; - int32_t divisor = 1 << fractional_bits; - - uint16_t result; - - uint16_t d = (uint16_t)dc_fixpt_floor( - dc_fixpt_abs( - arg)); - - if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor)) - numerator = (uint16_t)dc_fixpt_floor( - dc_fixpt_mul_int( - arg, - divisor)); - else { - numerator = dc_fixpt_floor( - dc_fixpt_sub( - dc_fixpt_from_int( - 1LL << integer_bits), - dc_fixpt_recip( - dc_fixpt_from_int( - divisor)))); - } - - if (numerator >= 0) - result = (uint16_t)numerator; - else - result = (uint16_t)( - (1 << (integer_bits + fractional_bits + 1)) + numerator); - - if ((result != 0) && dc_fixpt_lt( - arg, dc_fixpt_zero)) - result |= 1 << (integer_bits + fractional_bits); - - return result; -} - -void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale, - const struct dc_plane_state *plane_state) -{ - if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN - && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID - && plane_state->input_csc_color_matrix.enable_adjustment - && plane_state->coeff_reduction_factor.value != 0) { - bias_and_scale->scale_blue = fixed_point_to_int_frac( - dc_fixpt_mul(plane_state->coeff_reduction_factor, - dc_fixpt_from_fraction(256, 255)), - 2, - 13); - bias_and_scale->scale_red = bias_and_scale->scale_blue; - bias_and_scale->scale_green = bias_and_scale->scale_blue; - } else { - bias_and_scale->scale_blue = 0x2000; - bias_and_scale->scale_red = 0x2000; - bias_and_scale->scale_green = 0x2000; - } -} - -static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) +static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) { struct dc_bias_and_scale bns_params = {0}; @@ -2182,12 +2095,12 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) NULL); //set scale and bias registers - dcn10_build_prescale_params(&bns_params, plane_state); + build_prescale_params(&bns_params, plane_state); if (dpp->funcs->dpp_program_bias_and_scale) dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } -static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = {{0}}; @@ -2198,10 +2111,10 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { - dcn10_get_hdr_visual_confirm_color( + dc->hwss.get_hdr_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { - dcn10_get_surface_visual_confirm_color( + dc->hwss.get_surface_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else { color_space_to_black_color( @@ -2283,7 +2196,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); } -void update_dchubp_dpp( +static void dcn10_update_dchubp_dpp( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) @@ -2341,7 +2254,7 @@ void update_dchubp_dpp( if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.bpp_change) - update_dpp(dpp, plane_state); + dcn10_update_dpp(dpp, plane_state); if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.per_pixel_alpha_change || @@ -2412,7 +2325,7 @@ void update_dchubp_dpp( hubp->funcs->set_blank(hubp, false); } -static void dcn10_blank_pixel_data( +void dcn10_blank_pixel_data( struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank) @@ -2455,7 +2368,7 @@ static void dcn10_blank_pixel_data( } } -void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) +void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx) { struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult; uint32_t hw_mult = 0x1f000; // 1.0 default multiplier @@ -2485,14 +2398,14 @@ void dcn10_program_pipe( if (pipe_ctx->plane_state->update_flags.bits.full_update) dcn10_enable_plane(dc, pipe_ctx, context); - update_dchubp_dpp(dc, pipe_ctx, context); + dcn10_update_dchubp_dpp(dc, pipe_ctx, context); - set_hdr_multiplier(pipe_ctx); + dc->hwss.set_hdr_multiplier(pipe_ctx); if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + dc->hwss.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); /* dcn10_translate_regamma_to_hw_format takes 750us to finish * only do gamma programming for full update. @@ -2501,10 +2414,10 @@ void dcn10_program_pipe( * doing heavy calculation and programming */ if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + dc->hwss.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); } -static void program_all_pipe_in_tree( +static void dcn10_program_all_pipe_in_tree( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) @@ -2523,19 +2436,19 @@ static void program_all_pipe_in_tree( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(pipe_ctx); + dc->hwss.setup_vupdate_interrupt(dc, pipe_ctx); dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); } if (pipe_ctx->plane_state != NULL) - dcn10_program_pipe(dc, pipe_ctx, context); + dc->hwss.program_pipe(dc, pipe_ctx, context); if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) - program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); + dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); } -struct pipe_ctx *find_top_pipe_for_stream( +static struct pipe_ctx *dcn10_find_top_pipe_for_stream( struct dc *dc, struct dc_state *context, const struct dc_stream_state *stream) @@ -2559,7 +2472,7 @@ struct pipe_ctx *find_top_pipe_for_stream( return NULL; } -static void dcn10_apply_ctx_for_surface( +void dcn10_apply_ctx_for_surface( struct dc *dc, const struct dc_stream_state *stream, int num_planes, @@ -2571,7 +2484,7 @@ static void dcn10_apply_ctx_for_surface( bool removed_pipe[4] = { false }; bool interdependent_update = false; struct pipe_ctx *top_pipe_to_program = - find_top_pipe_for_stream(dc, context, stream); + dcn10_find_top_pipe_for_stream(dc, context, stream); DC_LOGGER_INIT(dc->ctx->logger); if (!top_pipe_to_program) @@ -2588,7 +2501,7 @@ static void dcn10_apply_ctx_for_surface( ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program)); if (interdependent_update) - lock_all_pipes(dc, context, true); + dcn10_lock_all_pipes(dc, context, true); else dcn10_pipe_control_lock(dc, top_pipe_to_program, true); @@ -2635,7 +2548,7 @@ static void dcn10_apply_ctx_for_surface( } if (num_planes > 0) - program_all_pipe_in_tree(dc, top_pipe_to_program, context); + dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context); /* Program secondary blending tree and writeback pipes */ if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) @@ -2655,7 +2568,7 @@ static void dcn10_apply_ctx_for_surface( } if (interdependent_update) - lock_all_pipes(dc, context, false); + dcn10_lock_all_pipes(dc, context, false); else dcn10_pipe_control_lock(dc, top_pipe_to_program, false); @@ -2692,14 +2605,14 @@ static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *contex } } -static void dcn10_prepare_bandwidth( +void dcn10_prepare_bandwidth( struct dc *dc, struct dc_state *context) { struct hubbub *hubbub = dc->res_pool->hubbub; if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (context->stream_count == 0) @@ -2721,17 +2634,17 @@ static void dcn10_prepare_bandwidth( dcn_bw_notify_pplib_of_wm_ranges(dc); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); } -static void dcn10_optimize_bandwidth( +void dcn10_optimize_bandwidth( struct dc *dc, struct dc_state *context) { struct hubbub *hubbub = dc->res_pool->hubbub; if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (context->stream_count == 0) @@ -2753,10 +2666,10 @@ static void dcn10_optimize_bandwidth( dcn_bw_notify_pplib_of_wm_ranges(dc); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); } -static void dcn10_set_drr(struct pipe_ctx **pipe_ctx, +void dcn10_set_drr(struct pipe_ctx **pipe_ctx, int num_pipes, unsigned int vmin, unsigned int vmax, unsigned int vmid, unsigned int vmid_frame_number) { @@ -2784,7 +2697,7 @@ static void dcn10_set_drr(struct pipe_ctx **pipe_ctx, } } -static void dcn10_get_position(struct pipe_ctx **pipe_ctx, +void dcn10_get_position(struct pipe_ctx **pipe_ctx, int num_pipes, struct crtc_position *position) { @@ -2796,7 +2709,7 @@ static void dcn10_get_position(struct pipe_ctx **pipe_ctx, pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position); } -static void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, +void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_events *events) { unsigned int i; @@ -2851,7 +2764,7 @@ static void dcn10_config_stereo_parameters( return; } -static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) +void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) { struct crtc_stereo_flags flags = { 0 }; struct dc_stream_state *stream = pipe_ctx->stream; @@ -2890,7 +2803,7 @@ static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_in return NULL; } -static void dcn10_wait_for_mpcc_disconnect( +void dcn10_wait_for_mpcc_disconnect( struct dc *dc, struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx) @@ -2898,7 +2811,7 @@ static void dcn10_wait_for_mpcc_disconnect( int mpcc_inst; if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); } if (!pipe_ctx->stream_res.opp) @@ -2915,12 +2828,12 @@ static void dcn10_wait_for_mpcc_disconnect( } if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + dc->hwss.verify_allow_pstate_change_high(dc); } } -static bool dcn10_dummy_display_power_gating( +bool dcn10_dummy_display_power_gating( struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, @@ -2929,7 +2842,7 @@ static bool dcn10_dummy_display_power_gating( return true; } -static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) +void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) { struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct timing_generator *tg = pipe_ctx->stream_res.tg; @@ -2953,7 +2866,7 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) } } -static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) +void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) { struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub; @@ -2961,7 +2874,7 @@ static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh hubbub->funcs->update_dchub(hubbub, dh_data); } -static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -3027,7 +2940,7 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height); } -static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) { struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; @@ -3037,7 +2950,7 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, attributes); } -static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) { uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level; struct fixed31_32 multiplier; @@ -3064,12 +2977,12 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, &opt_attr); } -/** -* apply_front_porch_workaround TODO FPGA still need? -* -* This is a workaround for a bug that has existed since R5xx and has not been -* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. -*/ +/* + * apply_front_porch_workaround TODO FPGA still need? + * + * This is a workaround for a bug that has existed since R5xx and has not been + * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. + */ static void apply_front_porch_workaround( struct dc_crtc_timing *timing) { @@ -3082,7 +2995,7 @@ static void apply_front_porch_workaround( } } -int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) +int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) { const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; struct dc_crtc_timing patched_crtc_timing; @@ -3111,34 +3024,8 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) return vertical_line_start; } -void lock_all_pipes(struct dc *dc, - struct dc_state *context, - bool lock) -{ - struct pipe_ctx *pipe_ctx; - struct timing_generator *tg; - int i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - pipe_ctx = &context->res_ctx.pipe_ctx[i]; - tg = pipe_ctx->stream_res.tg; - /* - * Only lock the top pipe's tg to prevent redundant - * (un)locking. Also skip if pipe is disabled. - */ - if (pipe_ctx->top_pipe || - !pipe_ctx->stream || !pipe_ctx->plane_state || - !tg->funcs->is_tg_enabled(tg)) - continue; - - if (lock) - tg->funcs->lock(tg); - else - tg->funcs->unlock(tg); - } -} - -static void calc_vupdate_position( +static void dcn10_calc_vupdate_position( + struct dc *dc, struct pipe_ctx *pipe_ctx, uint32_t *start_line, uint32_t *end_line) @@ -3146,7 +3033,7 @@ static void calc_vupdate_position( const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; int vline_int_offset_from_vupdate = pipe_ctx->stream->periodic_interrupt0.lines_offset; - int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx); + int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); int start_position; if (vline_int_offset_from_vupdate > 0) @@ -3167,7 +3054,8 @@ static void calc_vupdate_position( *end_line = 2; } -static void cal_vline_position( +static void dcn10_cal_vline_position( + struct dc *dc, struct pipe_ctx *pipe_ctx, enum vline_select vline, uint32_t *start_line, @@ -3182,7 +3070,8 @@ static void cal_vline_position( switch (ref_point) { case START_V_UPDATE: - calc_vupdate_position( + dcn10_calc_vupdate_position( + dc, pipe_ctx, start_line, end_line); @@ -3196,7 +3085,8 @@ static void cal_vline_position( } } -static void dcn10_setup_periodic_interrupt( +void dcn10_setup_periodic_interrupt( + struct dc *dc, struct pipe_ctx *pipe_ctx, enum vline_select vline) { @@ -3206,7 +3096,7 @@ static void dcn10_setup_periodic_interrupt( uint32_t start_line = 0; uint32_t end_line = 0; - cal_vline_position(pipe_ctx, vline, &start_line, &end_line); + dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line); tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); @@ -3217,10 +3107,10 @@ static void dcn10_setup_periodic_interrupt( } } -static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) +void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct timing_generator *tg = pipe_ctx->stream_res.tg; - int start_line = get_vupdate_offset_from_vsync(pipe_ctx); + int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); if (start_line < 0) { ASSERT(0); @@ -3231,7 +3121,7 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) tg->funcs->setup_vertical_interrupt2(tg, start_line); } -static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, +void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { struct encoder_unblank_param params = { { 0 } }; @@ -3254,7 +3144,7 @@ static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, } } -static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, +void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, const uint8_t *custom_sdp_message, unsigned int sdp_message_size) { @@ -3265,7 +3155,7 @@ static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, sdp_message_size); } } -static enum dc_status dcn10_set_clock(struct dc *dc, +enum dc_status dcn10_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) @@ -3305,7 +3195,7 @@ static enum dc_status dcn10_set_clock(struct dc *dc, } -static void dcn10_get_clock(struct dc *dc, +void dcn10_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) { @@ -3315,77 +3205,3 @@ static void dcn10_get_clock(struct dc *dc, dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg); } - -static const struct hw_sequencer_funcs dcn10_funcs = { - .program_gamut_remap = dcn10_program_gamut_remap, - .init_hw = dcn10_init_hw, - .init_pipes = dcn10_init_pipes, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, - .update_plane_addr = dcn10_update_plane_addr, - .plane_atomic_disconnect = hwss1_plane_atomic_disconnect, - .update_dchub = dcn10_update_dchub, - .update_mpcc = dcn10_update_mpcc, - .update_pending_status = dcn10_update_pending_status, - .set_input_transfer_func = dcn10_set_input_transfer_func, - .set_output_transfer_func = dcn10_set_output_transfer_func, - .program_output_csc = dcn10_program_output_csc, - .power_down = dce110_power_down, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dce110_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dce110_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn10_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .disable_plane = dcn10_disable_plane, - .blank_pixel_data = dcn10_blank_pixel_data, - .pipe_control_lock = dcn10_pipe_control_lock, - .prepare_bandwidth = dcn10_prepare_bandwidth, - .optimize_bandwidth = dcn10_optimize_bandwidth, - .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, - .enable_stream_timing = dcn10_enable_stream_timing, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dce110_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .disable_stream_gating = NULL, - .enable_stream_gating = NULL, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = NULL, - .disable_vga = dcn10_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn10_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn10_enable_power_gating_plane, - .dpp_pg_control = dcn10_dpp_pg_control, - .hubp_pg_control = dcn10_hubp_pg_control, - .dsc_pg_control = NULL, -}; - - -void dcn10_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn10_funcs; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index d3616b1948cc..5aad3922be6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -31,64 +31,155 @@ struct dc; void dcn10_hw_sequencer_construct(struct dc *dc); -extern void fill_display_configs( - const struct dc_state *context, - struct dm_pp_display_configuration *pp_display_cfg); - -bool is_rgb_cspace(enum dc_color_space output_color_space); - -void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); - -void dcn10_verify_allow_pstate_change_high(struct dc *dc); +int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); +void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +enum dc_status dcn10_enable_stream_timing( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); +void dcn10_optimize_bandwidth( + struct dc *dc, + struct dc_state *context); +void dcn10_prepare_bandwidth( + struct dc *dc, + struct dc_state *context); +void dcn10_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +void dcn10_blank_pixel_data( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); +void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, + struct dc_link_settings *link_settings); +void dcn10_program_output_csc(struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id); +bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream); +bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); +void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context); +void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_apply_ctx_for_surface( + struct dc *dc, + const struct dc_stream_state *stream, + int num_planes, + struct dc_state *context); +void dcn10_hubp_pg_control( + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); +void dcn10_dpp_pg_control( + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); +void dcn10_enable_power_gating_plane( + struct dce_hwseq *hws, + bool enable); +void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_disable_vga( + struct dce_hwseq *hws); void dcn10_program_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); - -void dcn10_get_hw_state( +void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx); +void dcn10_init_hw(struct dc *dc); +void dcn10_init_pipes(struct dc *dc, struct dc_state *context); +enum dc_status dce110_apply_ctx_to_hw( + struct dc *dc, + struct dc_state *context); +void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data); +void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx); +void dce110_power_down(struct dc *dc); +void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context); +void dcn10_enable_timing_synchronization( + struct dc *dc, + int group_index, + int group_size, + struct pipe_ctx *grouped_pipes[]); +void dcn10_enable_per_frame_crtc_position_reset( + struct dc *dc, + int group_size, + struct pipe_ctx *grouped_pipes[]); +void dce110_update_info_frame(struct pipe_ctx *pipe_ctx); +void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, + const uint8_t *custom_sdp_message, + unsigned int sdp_message_size); +void dce110_blank_stream(struct pipe_ctx *pipe_ctx); +void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx); +void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx); +bool dcn10_dummy_display_power_gating( struct dc *dc, - char *pBuf, unsigned int bufSize, + uint8_t controller_id, + struct dc_bios *dcb, + enum pipe_gating_control power_gating); +void dcn10_set_drr(struct pipe_ctx **pipe_ctx, + int num_pipes, unsigned int vmin, unsigned int vmax, + unsigned int vmid, unsigned int vmid_frame_number); +void dcn10_get_position(struct pipe_ctx **pipe_ctx, + int num_pipes, + struct crtc_position *position); +void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_events *events); +void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc); +void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); +void dcn10_log_hw_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); +void dcn10_get_hw_state(struct dc *dc, + char *pBuf, + unsigned int bufSize, unsigned int mask); - void dcn10_clear_status_bits(struct dc *dc, unsigned int mask); - -bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp); - -void set_hdr_multiplier(struct pipe_ctx *pipe_ctx); - +void dcn10_wait_for_mpcc_disconnect( + struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx); +void dce110_edp_backlight_control( + struct dc_link *link, + bool enable); +void dce110_edp_power_control( + struct dc_link *link, + bool power_up); +void dce110_edp_wait_for_hpd_ready( + struct dc_link *link, + bool power_up); +void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx); +void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx); +void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx); +void dcn10_setup_periodic_interrupt( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum vline_select vline); +enum dc_status dcn10_set_clock(struct dc *dc, + enum dc_clock_type clock_type, + uint32_t clk_khz, + uint32_t stepping); +void dcn10_get_clock(struct dc *dc, + enum dc_clock_type clock_type, + struct dc_clock_config *clock_cfg); +bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_bios_golden_init(struct dc *dc); +void dcn10_plane_atomic_power_down(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp); void dcn10_get_surface_visual_confirm_color( const struct pipe_ctx *pipe_ctx, struct tg_color *color); - void dcn10_get_hdr_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); - -bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx); - -void update_dchubp_dpp( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context); - -struct pipe_ctx *find_top_pipe_for_stream( - struct dc *dc, - struct dc_state *context, - const struct dc_stream_state *stream); - -int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); - -void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale, - const struct dc_plane_state *plane_state); -void lock_all_pipes(struct dc *dc, - struct dc_state *context, - bool lock); +void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx); +void dcn10_verify_allow_pstate_change_high(struct dc *dc); #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h new file mode 100644 index 000000000000..596f95c22e85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h @@ -0,0 +1,43 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN10_DEBUG_H__ +#define __DC_HWSS_DCN10_DEBUG_H__ + +#include "core_types.h" + +struct dc; + +void dcn10_clear_status_bits(struct dc *dc, unsigned int mask); + +void dcn10_log_hw_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); + +void dcn10_get_hw_state(struct dc *dc, + char *pBuf, + unsigned int bufSize, + unsigned int mask); + +#endif /* __DC_HWSS_DCN10_DEBUG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c new file mode 100644 index 000000000000..38923f3120ee --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -0,0 +1,105 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10_hw_sequencer.h" + +static const struct hw_sequencer_funcs dcn10_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .init_pipes = dcn10_init_pipes, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, + .update_plane_addr = dcn10_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .program_pipe = dcn10_program_pipe, + .update_dchub = dcn10_update_dchub, + .update_mpcc = dcn10_update_mpcc, + .update_pending_status = dcn10_update_pending_status, + .set_input_transfer_func = dcn10_set_input_transfer_func, + .set_output_transfer_func = dcn10_set_output_transfer_func, + .program_output_csc = dcn10_program_output_csc, + .power_down = dce110_power_down, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dce110_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn10_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .disable_plane = dcn10_disable_plane, + .blank_pixel_data = dcn10_blank_pixel_data, + .pipe_control_lock = dcn10_pipe_control_lock, + .prepare_bandwidth = dcn10_prepare_bandwidth, + .optimize_bandwidth = dcn10_optimize_bandwidth, + .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, + .enable_stream_timing = dcn10_enable_stream_timing, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = NULL, + .disable_vga = dcn10_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn10_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn10_enable_power_gating_plane, + .dpp_pg_control = dcn10_dpp_pg_control, + .hubp_pg_control = dcn10_hubp_pg_control, + .dsc_pg_control = NULL, + .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, + .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +void dcn10_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn10_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h new file mode 100644 index 000000000000..8c6fd7b844a4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN10_INIT_H__ +#define __DC_DCN10_INIT_H__ + +struct dc; + +void dcn10_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN10_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 602769e2f4a7..3b71898e859e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -28,6 +28,8 @@ #include "dm_services.h" #include "dc.h" +#include "dcn10_init.h" + #include "resource.h" #include "include/irq_service_interface.h" #include "dcn10_resource.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index 1eebaac81b36..1f94b8bc68b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -1,7 +1,7 @@ # # Makefile for DCN. -DCN20 = dcn20_resource.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ +DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 868099fbe8ba..03e4aafb237b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -25,15 +25,15 @@ #include #include "dm_services.h" +#include "basics/dc_common.h" #include "dm_helpers.h" #include "core_types.h" #include "resource.h" -#include "dcn20/dcn20_resource.h" -#include "dce110/dce110_hw_sequencer.h" -#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20_resource.h" #include "dcn20_hwseq.h" #include "dce/dce_hwseq.h" -#include "dcn20/dcn20_dsc.h" +#include "dcn20_dsc.h" +#include "dcn20_optc.h" #include "abm.h" #include "clk_mgr.h" #include "dmcu.h" @@ -43,10 +43,9 @@ #include "ipp.h" #include "mpc.h" #include "mcif_wb.h" +#include "dchubbub.h" #include "reg_helper.h" #include "dcn10/dcn10_cm_common.h" -#include "dcn10/dcn10_hubbub.h" -#include "dcn10/dcn10_optc.h" #include "dc_link_dp.h" #include "vm_helper.h" #include "dccg.h" @@ -62,7 +61,125 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name -static void dcn20_enable_power_gating_plane( +static int find_free_gsl_group(const struct dc *dc) +{ + if (dc->res_pool->gsl_groups.gsl_0 == 0) + return 1; + if (dc->res_pool->gsl_groups.gsl_1 == 0) + return 2; + if (dc->res_pool->gsl_groups.gsl_2 == 0) + return 3; + + return 0; +} + +/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) + * This is only used to lock pipes in pipe splitting case with immediate flip + * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, + * so we get tearing with freesync since we cannot flip multiple pipes + * atomically. + * We use GSL for this: + * - immediate flip: find first available GSL group if not already assigned + * program gsl with that group, set current OTG as master + * and always us 0x4 = AND of flip_ready from all pipes + * - vsync flip: disable GSL if used + * + * Groups in stream_res are stored as +1 from HW registers, i.e. + * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 + * Using a magic value like -1 would require tracking all inits/resets + */ +static void dcn20_setup_gsl_group_as_lock( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable) +{ + struct gsl_params gsl; + int group_idx; + + memset(&gsl, 0, sizeof(struct gsl_params)); + + if (enable) { + /* return if group already assigned since GSL was set up + * for vsync flip, we would unassign so it can't be "left over" + */ + if (pipe_ctx->stream_res.gsl_group > 0) + return; + + group_idx = find_free_gsl_group(dc); + ASSERT(group_idx != 0); + pipe_ctx->stream_res.gsl_group = group_idx; + + /* set gsl group reg field and mark resource used */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 1; + dc->res_pool->gsl_groups.gsl_0 = 1; + break; + case 2: + gsl.gsl1_en = 1; + dc->res_pool->gsl_groups.gsl_1 = 1; + break; + case 3: + gsl.gsl2_en = 1; + dc->res_pool->gsl_groups.gsl_2 = 1; + break; + default: + BREAK_TO_DEBUGGER(); + return; // invalid case + } + gsl.gsl_master_en = 1; + } else { + group_idx = pipe_ctx->stream_res.gsl_group; + if (group_idx == 0) + return; // if not in use, just return + + pipe_ctx->stream_res.gsl_group = 0; + + /* unset gsl group reg field and mark resource free */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 0; + dc->res_pool->gsl_groups.gsl_0 = 0; + break; + case 2: + gsl.gsl1_en = 0; + dc->res_pool->gsl_groups.gsl_1 = 0; + break; + case 3: + gsl.gsl2_en = 0; + dc->res_pool->gsl_groups.gsl_2 = 0; + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + gsl.gsl_master_en = 0; + } + + /* at this point we want to program whether it's to enable or disable */ + if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { + pipe_ctx->stream_res.tg->funcs->set_gsl( + pipe_ctx->stream_res.tg, + &gsl); + + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( + pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); + } else + BREAK_TO_DEBUGGER(); +} + +void dcn20_set_flip_control_gsl( + struct pipe_ctx *pipe_ctx, + bool flip_immediate) +{ + if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) + pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( + pipe_ctx->plane_res.hubp, flip_immediate); + +} + +void dcn20_enable_power_gating_plane( struct dce_hwseq *hws, bool enable) { @@ -126,44 +243,6 @@ void dcn20_dccg_init(struct dce_hwseq *hws) /* This value is dependent on the hardware pipeline delay so set once per SOC */ REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c); } -void dcn20_display_init(struct dc *dc) -{ - struct dce_hwseq *hws = dc->hwseq; - - /* RBBMIF - * disable RBBMIF timeout detection for all clients - * Ensure RBBMIF does not drop register accesses due to the per-client timeout - */ - REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); - REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); - - /* DCCG */ - dcn20_dccg_init(hws); - - REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 0); - - /* DCHUB/MMHUBBUB - * set global timer refclk divider - * 100Mhz refclk -> 2 - * 27Mhz refclk -> 1 - * 48Mhz refclk -> 1 - */ - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(REFCLK_CNTL, 0); - - /* OPTC - * OTG_CONTROL.OTG_DISABLE_POINT_CNTL = 0x3; will be set during optc2_enable_crtc - */ - - /* AZ - * default value is 0x64 for 100Mhz ref clock, if the ref clock is 100Mhz, no need to program this regiser, - * if not, it should be programmed according to the ref clock - */ - REG_UPDATE(AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, 0x64); - /* Enable controller clock gating */ - REG_WRITE(AZALIA_CONTROLLER_CLOCK_GATING, 0x1); -} void dcn20_disable_vga( struct dce_hwseq *hws) @@ -176,15 +255,15 @@ void dcn20_disable_vga( REG_WRITE(D6VGA_CONTROL, 0); } -void dcn20_program_tripleBuffer( +void dcn20_program_triple_buffer( const struct dc *dc, struct pipe_ctx *pipe_ctx, - bool enableTripleBuffer) + bool enable_triple_buffer) { if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) { pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer( pipe_ctx->plane_res.hubp, - enableTripleBuffer); + enable_triple_buffer); } } @@ -240,10 +319,10 @@ void dcn20_init_blank( otg_active_height); } - dcn20_hwss_wait_for_blank_complete(opp); + dc->hwss.wait_for_blank_complete(opp); } -static void dcn20_dsc_pg_control( +void dcn20_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on) @@ -320,7 +399,7 @@ static void dcn20_dsc_pg_control( REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } -static void dcn20_dpp_pg_control( +void dcn20_dpp_pg_control( struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) @@ -394,7 +473,7 @@ static void dcn20_dpp_pg_control( } -static void dcn20_hubp_pg_control( +void dcn20_hubp_pg_control( struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) @@ -471,7 +550,7 @@ static void dcn20_hubp_pg_control( /* disable HW used by plane. * note: cannot disable until disconnect is complete */ -static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; @@ -591,7 +670,7 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } - dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp); + dc->hwss.wait_for_blank_complete(pipe_ctx->stream_res.opp); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -647,7 +726,7 @@ void dcn20_program_output_csc(struct dc *dc, } } -bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, +bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) { int mpcc_id = pipe_ctx->plane_res.hubp->inst; @@ -737,8 +816,9 @@ bool dcn20_set_shaper_3dlut( return result; } -bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state) +bool dcn20_set_input_transfer_func(struct dc *dc, + struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; const struct dc_transfer_func *tf = NULL; @@ -748,8 +828,8 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, if (dpp_base == NULL || plane_state == NULL) return false; - dcn20_set_shaper_3dlut(pipe_ctx, plane_state); - dcn20_set_blend_lut(pipe_ctx, plane_state); + dc->hwss.set_shaper_3dlut(pipe_ctx, plane_state); + dc->hwss.set_blend_lut(pipe_ctx, plane_state); if (plane_state->in_transfer_func) tf = plane_state->in_transfer_func; @@ -814,7 +894,7 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, return result; } -static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -1237,7 +1317,7 @@ static void dcn20_update_dchubp_dpp( if (dpp->funcs->dpp_program_bias_and_scale) { //TODO :for CNVC set scale and bias registers if necessary - dcn10_build_prescale_params(&bns_params, plane_state); + build_prescale_params(&bns_params, plane_state); dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } } @@ -1361,7 +1441,7 @@ static void dcn20_program_pipe( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(pipe_ctx); + dc->hwss.setup_vupdate_interrupt(dc, pipe_ctx); } if (pipe_ctx->update_flags.bits.odm) @@ -1375,19 +1455,19 @@ static void dcn20_program_pipe( if (pipe_ctx->update_flags.bits.enable || pipe_ctx->plane_state->update_flags.bits.hdr_mult) - set_hdr_multiplier(pipe_ctx); + dc->hwss.set_hdr_multiplier(pipe_ctx); if (pipe_ctx->update_flags.bits.enable || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + dc->hwss.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); /* dcn10_translate_regamma_to_hw_format takes 750us to finish * only do gamma programming for powering on, internal memcmp to avoid * updating on slave planes */ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + dc->hwss.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); /* If the pipe has been enabled or has a different opp, we * should reprogram the fmt. This deals with cases where @@ -1421,7 +1501,7 @@ static bool does_pipe_need_lock(struct pipe_ctx *pipe) return false; } -static void dcn20_program_front_end_for_ctx( +void dcn20_program_front_end_for_ctx( struct dc *dc, struct dc_state *context) { @@ -1602,7 +1682,7 @@ bool dcn20_update_bandwidth( dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(pipe_ctx); + dc->hwss.setup_vupdate_interrupt(dc, pipe_ctx); } pipe_ctx->plane_res.hubp->funcs->hubp_setup( @@ -1616,7 +1696,7 @@ bool dcn20_update_bandwidth( return true; } -static void dcn20_enable_writeback( +void dcn20_enable_writeback( struct dc *dc, const struct dc_stream_status *stream_status, struct dc_writeback_info *wb_info, @@ -1660,7 +1740,7 @@ void dcn20_disable_writeback( mcif_wb->funcs->disable_mcif(mcif_wb); } -bool dcn20_hwss_wait_for_blank_complete( +bool dcn20_wait_for_blank_complete( struct output_pixel_processor *opp) { int counter; @@ -1689,7 +1769,7 @@ bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) return hubp->funcs->dmdata_status_done(hubp); } -static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; @@ -1704,7 +1784,7 @@ static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx } } -static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; @@ -1739,12 +1819,7 @@ void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) hubp->funcs->dmdata_set_attributes(hubp, &attr); } -void dcn20_disable_stream(struct pipe_ctx *pipe_ctx) -{ - dce110_disable_stream(pipe_ctx); -} - -static void dcn20_init_vm_ctx( +void dcn20_init_vm_ctx( struct dce_hwseq *hws, struct dc *dc, struct dc_virtual_addr_space_config *va_config, @@ -1766,7 +1841,7 @@ static void dcn20_init_vm_ctx( dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid); } -static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) { struct dcn_hubbub_phys_addr_config config; @@ -1810,8 +1885,7 @@ static bool patch_address_for_sbs_tb_stereo( return false; } - -static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) { bool addr_patched = false; PHYSICAL_ADDRESS_LOC addr; @@ -1857,7 +1931,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; if (dc_is_dp_signal(pipe_ctx->stream->signal)) { - if (optc1_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) + if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) params.timing.pix_clk_100hz /= 2; pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); @@ -1869,10 +1943,10 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, } } -void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) +void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct timing_generator *tg = pipe_ctx->stream_res.tg; - int start_line = get_vupdate_offset_from_vsync(pipe_ctx); + int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); if (start_line < 0) start_line = 0; @@ -1948,7 +2022,7 @@ static void dcn20_reset_back_end_for_pipe( pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); } -static void dcn20_reset_hw_ctx_wrap( +void dcn20_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context) { @@ -2001,7 +2075,7 @@ void dcn20_get_mpctree_visual_confirm_color( *color = pipe_colors[top_pipe->pipe_idx]; } -static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = { {0} }; @@ -2013,10 +2087,10 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) // input to MPCC is always RGB, by default leave black_color at 0 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { - dcn10_get_hdr_visual_confirm_color( + dc->hwss.get_hdr_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { - dcn10_get_surface_visual_confirm_color( + dc->hwss.get_surface_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { dcn20_get_mpctree_visual_confirm_color( @@ -2083,125 +2157,7 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->mpcc_id = mpcc_id; } -static int find_free_gsl_group(const struct dc *dc) -{ - if (dc->res_pool->gsl_groups.gsl_0 == 0) - return 1; - if (dc->res_pool->gsl_groups.gsl_1 == 0) - return 2; - if (dc->res_pool->gsl_groups.gsl_2 == 0) - return 3; - - return 0; -} - -/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) - * This is only used to lock pipes in pipe splitting case with immediate flip - * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, - * so we get tearing with freesync since we cannot flip multiple pipes - * atomically. - * We use GSL for this: - * - immediate flip: find first available GSL group if not already assigned - * program gsl with that group, set current OTG as master - * and always us 0x4 = AND of flip_ready from all pipes - * - vsync flip: disable GSL if used - * - * Groups in stream_res are stored as +1 from HW registers, i.e. - * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 - * Using a magic value like -1 would require tracking all inits/resets - */ -void dcn20_setup_gsl_group_as_lock( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enable) -{ - struct gsl_params gsl; - int group_idx; - - memset(&gsl, 0, sizeof(struct gsl_params)); - - if (enable) { - /* return if group already assigned since GSL was set up - * for vsync flip, we would unassign so it can't be "left over" - */ - if (pipe_ctx->stream_res.gsl_group > 0) - return; - - group_idx = find_free_gsl_group(dc); - ASSERT(group_idx != 0); - pipe_ctx->stream_res.gsl_group = group_idx; - - /* set gsl group reg field and mark resource used */ - switch (group_idx) { - case 1: - gsl.gsl0_en = 1; - dc->res_pool->gsl_groups.gsl_0 = 1; - break; - case 2: - gsl.gsl1_en = 1; - dc->res_pool->gsl_groups.gsl_1 = 1; - break; - case 3: - gsl.gsl2_en = 1; - dc->res_pool->gsl_groups.gsl_2 = 1; - break; - default: - BREAK_TO_DEBUGGER(); - return; // invalid case - } - gsl.gsl_master_en = 1; - } else { - group_idx = pipe_ctx->stream_res.gsl_group; - if (group_idx == 0) - return; // if not in use, just return - - pipe_ctx->stream_res.gsl_group = 0; - - /* unset gsl group reg field and mark resource free */ - switch (group_idx) { - case 1: - gsl.gsl0_en = 0; - dc->res_pool->gsl_groups.gsl_0 = 0; - break; - case 2: - gsl.gsl1_en = 0; - dc->res_pool->gsl_groups.gsl_1 = 0; - break; - case 3: - gsl.gsl2_en = 0; - dc->res_pool->gsl_groups.gsl_2 = 0; - break; - default: - BREAK_TO_DEBUGGER(); - return; - } - gsl.gsl_master_en = 0; - } - - /* at this point we want to program whether it's to enable or disable */ - if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { - pipe_ctx->stream_res.tg->funcs->set_gsl( - pipe_ctx->stream_res.tg, - &gsl); - - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( - pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); - } else - BREAK_TO_DEBUGGER(); -} - -static void dcn20_set_flip_control_gsl( - struct pipe_ctx *pipe_ctx, - bool flip_immediate) -{ - if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) - pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( - pipe_ctx->plane_res.hubp, flip_immediate); - -} - -static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) +void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = pipe_ctx->stream->link->cur_link_settings.lane_count; @@ -2249,7 +2205,7 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) } } -static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) +void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -2275,7 +2231,7 @@ static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) hubp->inst, mode); } -static void dcn20_fpga_init_hw(struct dc *dc) +void dcn20_fpga_init_hw(struct dc *dc) { int i, j; struct dce_hwseq *hws = dc->hwseq; @@ -2296,7 +2252,7 @@ static void dcn20_fpga_init_hw(struct dc *dc) REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); - dcn20_dccg_init(hws); + dc->hwss.dccg_init(hws); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); @@ -2360,7 +2316,7 @@ static void dcn20_fpga_init_hw(struct dc *dc) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; /*to do*/ - hwss1_plane_atomic_disconnect(dc, pipe_ctx); + dc->hwss.plane_atomic_disconnect(dc, pipe_ctx); } /* initialize DWB pointer to MCIF_WB */ @@ -2389,53 +2345,3 @@ static void dcn20_fpga_init_hw(struct dc *dc) tg->funcs->tg_init(tg); } } - -void dcn20_hw_sequencer_construct(struct dc *dc) -{ - dcn10_hw_sequencer_construct(dc); - dc->hwss.unblank_stream = dcn20_unblank_stream; - dc->hwss.update_plane_addr = dcn20_update_plane_addr; - dc->hwss.enable_stream_timing = dcn20_enable_stream_timing; - dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer; - dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func; - dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func; - dc->hwss.apply_ctx_for_surface = NULL; - dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx; - dc->hwss.pipe_control_lock = dcn20_pipe_control_lock; - dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global; - dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth; - dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth; - dc->hwss.update_bandwidth = dcn20_update_bandwidth; - dc->hwss.enable_writeback = dcn20_enable_writeback; - dc->hwss.disable_writeback = dcn20_disable_writeback; - dc->hwss.program_output_csc = dcn20_program_output_csc; - dc->hwss.update_odm = dcn20_update_odm; - dc->hwss.blank_pixel_data = dcn20_blank_pixel_data; - dc->hwss.dmdata_status_done = dcn20_dmdata_status_done; - dc->hwss.program_dmdata_engine = dcn20_program_dmdata_engine; - dc->hwss.enable_stream = dcn20_enable_stream; - dc->hwss.disable_stream = dcn20_disable_stream; - dc->hwss.init_sys_ctx = dcn20_init_sys_ctx; - dc->hwss.init_vm_ctx = dcn20_init_vm_ctx; - dc->hwss.disable_stream_gating = dcn20_disable_stream_gating; - dc->hwss.enable_stream_gating = dcn20_enable_stream_gating; - dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt; - dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap; - dc->hwss.update_mpcc = dcn20_update_mpcc; - dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl; - dc->hwss.init_blank = dcn20_init_blank; - dc->hwss.disable_plane = dcn20_disable_plane; - dc->hwss.plane_atomic_disable = dcn20_plane_atomic_disable; - dc->hwss.enable_power_gating_plane = dcn20_enable_power_gating_plane; - dc->hwss.dpp_pg_control = dcn20_dpp_pg_control; - dc->hwss.hubp_pg_control = dcn20_hubp_pg_control; - dc->hwss.dsc_pg_control = dcn20_dsc_pg_control; - dc->hwss.disable_vga = dcn20_disable_vga; - - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwss.init_pipes = NULL; - } - - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 3098f1049ed7..28aaceed6d8b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -26,90 +26,110 @@ #ifndef __DC_HWSS_DCN20_H__ #define __DC_HWSS_DCN20_H__ -struct dc; - -void dcn20_hw_sequencer_construct(struct dc *dc); - -enum dc_status dcn20_enable_stream_timing( - struct pipe_ctx *pipe_ctx, - struct dc_state *context, - struct dc *dc); - -void dcn20_blank_pixel_data( +bool dcn20_set_blend_lut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +bool dcn20_set_shaper_3dlut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +void dcn20_program_front_end_for_ctx( struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool blank); - + struct dc_state *context); +void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); +bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); +bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream); void dcn20_program_output_csc(struct dc *dc, struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace, uint16_t *matrix, int opp_id); - +void dcn20_enable_stream(struct pipe_ctx *pipe_ctx); +void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, + struct dc_link_settings *link_settings); +void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_blank_pixel_data( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); +void dcn20_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +void dcn20_pipe_control_lock_global( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); void dcn20_prepare_bandwidth( struct dc *dc, struct dc_state *context); - void dcn20_optimize_bandwidth( struct dc *dc, struct dc_state *context); - bool dcn20_update_bandwidth( struct dc *dc, struct dc_state *context); - +void dcn20_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context); +enum dc_status dcn20_enable_stream_timing( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); +void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_init_blank( + struct dc *dc, + struct timing_generator *tg); +void dcn20_disable_vga( + struct dce_hwseq *hws); +void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_power_gating_plane( + struct dce_hwseq *hws, + bool enable); +void dcn20_dpp_pg_control( + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); +void dcn20_hubp_pg_control( + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); +void dcn20_program_triple_buffer( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable_triple_buffer); +void dcn20_enable_writeback( + struct dc *dc, + const struct dc_stream_status *stream_status, + struct dc_writeback_info *wb_info, + struct dc_state *context); void dcn20_disable_writeback( struct dc *dc, unsigned int dwb_pipe_inst); - -bool dcn20_hwss_wait_for_blank_complete( - struct output_pixel_processor *opp); - -bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_stream_state *stream); - -bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state); - +void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx); - -void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx); - -void dcn20_disable_stream(struct pipe_ctx *pipe_ctx); - -void dcn20_program_tripleBuffer( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enableTripleBuffer); - -void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx); - -void dcn20_pipe_control_lock_global( +void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx); +void dcn20_init_vm_ctx( + struct dce_hwseq *hws, struct dc *dc, - struct pipe_ctx *pipe, - bool lock); -void dcn20_setup_gsl_group_as_lock(const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enable); -void dcn20_dccg_init(struct dce_hwseq *hws); -void dcn20_init_blank( - struct dc *dc, - struct timing_generator *tg); -void dcn20_display_init(struct dc *dc); -void dcn20_pipe_control_lock( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock); -void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); -void dcn20_enable_plane( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context); -bool dcn20_set_blend_lut( - struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); -bool dcn20_set_shaper_3dlut( - struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); -void dcn20_get_mpctree_visual_confirm_color( + struct dc_virtual_addr_space_config *va_config, + int vmid); +void dcn20_set_flip_control_gsl( struct pipe_ctx *pipe_ctx, - struct tg_color *color); + bool flip_immediate); +void dcn20_dsc_pg_control( + struct dce_hwseq *hws, + unsigned int dsc_inst, + bool power_on); +void dcn20_fpga_init_hw(struct dc *dc); +bool dcn20_wait_for_blank_complete( + struct output_pixel_processor *opp); +void dcn20_dccg_init(struct dce_hwseq *hws); +int dcn20_init_sys_ctx(struct dce_hwseq *hws, + struct dc *dc, + struct dc_phy_addr_space_config *pa_config); + #endif /* __DC_HWSS_DCN20_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c new file mode 100644 index 000000000000..51b6c25aa3c5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -0,0 +1,127 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20_hwseq.h" + +static const struct hw_sequencer_funcs dcn20_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .init_pipes = dcn10_init_pipes, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_dchub = dcn10_update_dchub, + .update_mpcc = dcn20_update_mpcc, + .update_pending_status = dcn10_update_pending_status, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .program_output_csc = dcn20_program_output_csc, + .power_down = dce110_power_down, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .disable_plane = dcn20_disable_plane, + .blank_pixel_data = dcn20_blank_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, + .pipe_control_lock_global = dcn20_pipe_control_lock_global, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn20_enable_power_gating_plane, + .dpp_pg_control = dcn20_dpp_pg_control, + .hubp_pg_control = dcn20_hubp_pg_control, + .dsc_pg_control = NULL, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn20_enable_writeback, + .disable_writeback = dcn20_disable_writeback, + .update_odm = dcn20_update_odm, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn20_program_dmdata_engine, + .init_sys_ctx = dcn20_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .dsc_pg_control = dcn20_dsc_pg_control, + .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, + .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn20_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +void dcn20_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn20_funcs; + + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + dc->hwss.init_hw = dcn20_fpga_init_hw; + dc->hwss.init_pipes = NULL; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h new file mode 100644 index 000000000000..12277797cd71 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN20_INIT_H__ +#define __DC_DCN20_INIT_H__ + +struct dc; + +void dcn20_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 0e50dc9b611a..f5854a5d2b76 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -201,11 +201,11 @@ void optc2_set_dsc_config(struct timing_generator *optc, OPTC_DSC_SLICE_WIDTH, dsc_slice_width); } -/** - * PTI i think is already done somewhere else for 2ka - * (opp?, please double check. - * OPTC side only has 1 register to set for PTI_ENABLE) - */ +/*TEMP: Need to figure out inheritance model here.*/ +bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) +{ + return optc1_is_two_pixels_per_containter(timing); +} void optc2_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing) @@ -219,7 +219,7 @@ void optc2_set_odm_bypass(struct timing_generator *optc, OPTC_SEG1_SRC_SEL, 0xf); REG_WRITE(OTG_H_TIMING_CNTL, 0); - h_div_2 = optc1_is_two_pixels_per_containter(dc_crtc_timing); + h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing); REG_UPDATE(OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, h_div_2); REG_SET(OPTC_MEMORY_CONFIG, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index 9ae22146d2d8..ac93fbfaee03 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -107,5 +107,5 @@ void optc2_triplebuffer_unlock(struct timing_generator *optc); void optc2_lock_doublebuffer_disable(struct timing_generator *optc); void optc2_lock_doublebuffer_enable(struct timing_generator *optc); void optc2_program_manual_trigger(struct timing_generator *optc); - +bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); #endif /* __DC_OPTC_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 38056e111c61..328d10f6fbfe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -28,6 +28,8 @@ #include "dm_services.h" #include "dc.h" +#include "dcn20_init.h" + #include "resource.h" #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" @@ -1385,7 +1387,7 @@ static void get_pixel_clock_parameters( if (opp_cnt == 4) pixel_clk_params->requested_pix_clk_100hz /= 4; - else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) + else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) pixel_clk_params->requested_pix_clk_100hz /= 2; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index 19fabac13c65..cb839b0adb9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -1,7 +1,8 @@ # # Makefile for DCN21. -DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o +DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \ + dcn21_hwseq.o dcn21_link_encoder.o ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) cc_stack_align := -mpreferred-stack-boundary=4 diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index b25215cadf85..005894dcabc9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -28,7 +28,6 @@ #include "core_types.h" #include "resource.h" #include "dce/dce_hwseq.h" -#include "dcn20/dcn20_hwseq.h" #include "vmid.h" #include "reg_helper.h" #include "hw/clk_mgr.h" @@ -61,7 +60,7 @@ static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *c } -static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) { struct dcn_hubbub_phys_addr_config config; @@ -82,7 +81,7 @@ static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_ph // work around for Renoir s0i3, if register is programmed, bypass golden init. -static bool dcn21_s0i3_golden_init_wa(struct dc *dc) +bool dcn21_s0i3_golden_init_wa(struct dc *dc) { struct dce_hwseq *hws = dc->hwseq; uint32_t value = 0; @@ -112,11 +111,3 @@ void dcn21_optimize_pwr_state( true); } -void dcn21_hw_sequencer_construct(struct dc *dc) -{ - dcn20_hw_sequencer_construct(dc); - dc->hwss.init_sys_ctx = dcn21_init_sys_ctx; - dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa; - dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state; - dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h index be67b62e6fb1..2f7b8a220eb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h @@ -28,6 +28,18 @@ struct dc; -void dcn21_hw_sequencer_construct(struct dc *dc); +int dcn21_init_sys_ctx(struct dce_hwseq *hws, + struct dc *dc, + struct dc_phy_addr_space_config *pa_config); + +bool dcn21_s0i3_golden_init_wa(struct dc *dc); + +void dcn21_exit_optimized_pwr_state( + const struct dc *dc, + struct dc_state *context); + +void dcn21_optimize_pwr_state( + const struct dc *dc, + struct dc_state *context); #endif /* __DC_HWSS_DCN21_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c new file mode 100644 index 000000000000..1d8b67b4e252 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -0,0 +1,131 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21_hwseq.h" + +static const struct hw_sequencer_funcs dcn21_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .init_pipes = dcn10_init_pipes, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_dchub = dcn10_update_dchub, + .update_mpcc = dcn20_update_mpcc, + .update_pending_status = dcn10_update_pending_status, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .program_output_csc = dcn20_program_output_csc, + .power_down = dce110_power_down, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .disable_plane = dcn20_disable_plane, + .blank_pixel_data = dcn20_blank_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, + .pipe_control_lock_global = dcn20_pipe_control_lock_global, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn20_enable_power_gating_plane, + .dpp_pg_control = dcn20_dpp_pg_control, + .hubp_pg_control = dcn20_hubp_pg_control, + .dsc_pg_control = NULL, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn20_enable_writeback, + .disable_writeback = dcn20_disable_writeback, + .update_odm = dcn20_update_odm, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn20_program_dmdata_engine, + .init_sys_ctx = dcn21_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .dsc_pg_control = dcn20_dsc_pg_control, + .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, + .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn20_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +void dcn21_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn21_funcs; + + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + dc->hwss.init_hw = dcn20_fpga_init_hw; + dc->hwss.init_pipes = NULL; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h new file mode 100644 index 000000000000..3ed24292648a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN21_INIT_H__ +#define __DC_DCN21_INIT_H__ + +struct dc; + +void dcn21_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 3e7215a464a6..dd3bc37d4eb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -26,6 +26,8 @@ #include "dm_services.h" #include "dc.h" +#include "dcn21_init.h" + #include "resource.h" #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 663fa1809a73..5941577d78a5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -148,11 +148,11 @@ struct hw_sequencer_funcs { void (*update_pending_status)( struct pipe_ctx *pipe_ctx); - bool (*set_input_transfer_func)( + bool (*set_input_transfer_func)(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); - bool (*set_output_transfer_func)( + bool (*set_output_transfer_func)(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream); @@ -279,8 +279,10 @@ struct hw_sequencer_funcs { void (*set_cursor_attribute)(struct pipe_ctx *pipe); void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe); - void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline); - void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx); + void (*setup_periodic_interrupt)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum vline_select vline); + void (*setup_vupdate_interrupt)(struct dc *dc, struct pipe_ctx *pipe_ctx); bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); void (*init_blank)(struct dc *dc, struct timing_generator *tg); @@ -338,6 +340,36 @@ struct hw_sequencer_funcs { struct dc_clock_config *clock_cfg); bool (*s0i3_golden_init_wa)(struct dc *dc); + + void (*get_surface_visual_confirm_color)( + const struct pipe_ctx *pipe_ctx, + struct tg_color *color); + + void (*get_hdr_visual_confirm_color)( + struct pipe_ctx *pipe_ctx, + struct tg_color *color); + + void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx); + + void (*verify_allow_pstate_change_high)(struct dc *dc); + + void (*program_pipe)( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); + + bool (*wait_for_blank_complete)( + struct output_pixel_processor *opp); + + void (*dccg_init)(struct dce_hwseq *hws); + + bool (*set_blend_lut)( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); + + bool (*set_shaper_3dlut)( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); + + int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx); }; void color_space_to_black_color( -- cgit From b9fe5151052f9d1123027e2de1e6372d884887de Mon Sep 17 00:00:00 2001 From: Jaehyun Chung Date: Thu, 31 Oct 2019 15:53:24 -0400 Subject: drm/amd/display: DML Validation Dump/Check with Logging [Why] Need validation that we are programming the expected values (rq, ttu, dlg) from DML. This debug feature will output logs if we are programming incorrect values and may help differentiate DAL issues from HW issues. [How] Dump relevant registers for each pipe with active stream. Compare current reg values with the converted DML output. Log mismatches when found. Signed-off-by: Jaehyun Chung Reviewed-by: Alvin Lee Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 18 +- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 310 +++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 345 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 7 + 5 files changed, 680 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 1395aff39980..61dd373b4715 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2162,8 +2162,24 @@ static void commit_planes_for_stream(struct dc *dc, dc, pipe_ctx->stream, stream_status->plane_count, context); } } - if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) + if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { dc->hwss.program_front_end_for_ctx(dc, context); +#ifdef CONFIG_DRM_AMD_DC_DCN1_0 + if (dc->debug.validate_dml_output) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; + if (cur_pipe.stream == NULL) + continue; + + cur_pipe.plane_res.hubp->funcs->validate_dml_output( + cur_pipe.plane_res.hubp, dc->ctx, + &context->res_ctx.pipe_ctx[i].rq_regs, + &context->res_ctx.pipe_ctx[i].dlg_regs, + &context->res_ctx.pipe_ctx[i].ttu_regs); + } + } +#endif + } // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3cb361917b4b..f30c77e44bb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -412,6 +412,7 @@ struct dc_debug_options { bool nv12_iflip_vm_wa; bool disable_dram_clock_change_vactive_support; + bool validate_dml_output; }; struct dc_debug_data { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 7d9ffb81584a..2823be75b071 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -30,6 +30,8 @@ #include "reg_helper.h" #include "basics/conversion.h" +#define DC_LOGGER_INIT(logger) + #define REG(reg)\ hubp2->hubp_regs->reg @@ -1244,6 +1246,313 @@ void hubp2_read_state(struct hubp *hubp) } +void hubp2_validate_dml_output(struct hubp *hubp, + struct dc_context *ctx, + struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, + struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + struct _vcs_dpi_display_rq_regs_st rq_regs = {0}; + struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0}; + struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0}; + DC_LOGGER_INIT(ctx->logger); + + /* Requestor Regs */ + REG_GET(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address); + REG_GET_4(DCN_EXPANSION_MODE, + DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode, + PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode, + MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode, + CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, + CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size, + MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size, + SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C, + CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size, + MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size, + SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear); + + if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address) + DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address); + if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode); + if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode); + if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode); + if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode); + + if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size); + if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size); + if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size); + if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size); + if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size); + if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size); + if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height); + if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear); + + if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size); + if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size); + if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size); + if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size); + if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size); + if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size); + if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height); + if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear); + + /* DLG - Per hubp */ + REG_GET_2(BLANK_OFFSET_0, + REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end, + DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end); + REG_GET(BLANK_OFFSET_1, + MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start); + REG_GET(DST_DIMENSIONS, + REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal); + REG_GET_2(DST_AFTER_SCALER, + REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler, + DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler); + REG_GET(REF_FREQ_TO_PIX_FREQ, + REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq); + + if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end); + if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end); + if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n", + dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start); + if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal) + DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal); + if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler); + if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler); + if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq) + DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n", + dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq); + + /* DLG - Per luma/chroma */ + REG_GET(VBLANK_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l); + if (REG(NOM_PARAMETERS_0)) + REG_GET(NOM_PARAMETERS_0, + DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l); + if (REG(NOM_PARAMETERS_1)) + REG_GET(NOM_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l); + REG_GET(NOM_PARAMETERS_4, + DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l); + REG_GET(NOM_PARAMETERS_5, + REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l); + REG_GET_2(PER_LINE_DELIVERY, + REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l, + REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c); + REG_GET_2(PER_LINE_DELIVERY_PRE, + REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l, + REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c); + REG_GET(VBLANK_PARAMETERS_2, + REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c); + if (REG(NOM_PARAMETERS_2)) + REG_GET(NOM_PARAMETERS_2, + DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c); + if (REG(NOM_PARAMETERS_3)) + REG_GET(NOM_PARAMETERS_3, + REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c); + REG_GET(NOM_PARAMETERS_6, + DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c); + REG_GET(NOM_PARAMETERS_7, + REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c); + REG_GET(VBLANK_PARAMETERS_3, + REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l); + REG_GET(VBLANK_PARAMETERS_4, + REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c); + + if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l); + if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l); + if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l); + if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l); + if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l); + if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l); + if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c); + if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c); + if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c); + if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c); + if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c); + if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c); + if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l); + if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c); + if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l); + if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c); + + /* TTU - per hubp */ + REG_GET_2(DCN_TTU_QOS_WM, + QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm, + QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm); + + if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm); + if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm); + + /* TTU - per luma/chroma */ + /* Assumed surf0 is luma and 1 is chroma */ + REG_GET_3(DCN_SURF0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l); + REG_GET_3(DCN_SURF1_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c); + REG_GET_3(DCN_CUR0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0); + REG_GET(FLIP_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l); + REG_GET(DCN_CUR0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0); + REG_GET(DCN_CUR1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1); + REG_GET(DCN_SURF0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l); + REG_GET(DCN_SURF1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c); + + if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l); + if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l); + if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l); + if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c); + if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c); + if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c); + if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0); + if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0); + if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0); + if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l); + if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0); + if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1) + DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1); + if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l); + if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c); +} + static struct hubp_funcs dcn20_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, @@ -1267,6 +1576,7 @@ static struct hubp_funcs dcn20_hubp_funcs = { .hubp_clear_underflow = hubp2_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp1_init, + .validate_dml_output = hubp2_validate_dml_output, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 32e8b589aeb5..0be1c917b242 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -29,6 +29,8 @@ #include "dm_services.h" #include "reg_helper.h" +#define DC_LOGGER_INIT(logger) + #define REG(reg)\ hubp21->hubp_regs->reg @@ -254,6 +256,348 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, SYSTEM_ACCESS_MODE, 0x3); } +void hubp21_validate_dml_output(struct hubp *hubp, + struct dc_context *ctx, + struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, + struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr) +{ + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + struct _vcs_dpi_display_rq_regs_st rq_regs = {0}; + struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0}; + struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0}; + DC_LOGGER_INIT(ctx->logger); + + /* Requester - Per hubp */ + REG_GET(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address); + REG_GET_4(DCN_EXPANSION_MODE, + DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode, + PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode, + MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode, + CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, + CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size, + VM_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size, + SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear); + REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C, + CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size, + SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear); + + if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address) + DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address); + if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode); + if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode); + if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode); + if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode); + + if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size); + if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size); + if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size); + if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size); + if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size); + if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:VM_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size); + if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height); + if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear); + + if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size); + if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size); + if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size); + if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size); + if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size); + if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height); + if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear); + + + /* DLG - Per hubp */ + REG_GET_2(BLANK_OFFSET_0, + REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end, + DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end); + REG_GET(BLANK_OFFSET_1, + MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start); + REG_GET(DST_DIMENSIONS, + REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal); + REG_GET_2(DST_AFTER_SCALER, + REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler, + DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler); + REG_GET(REF_FREQ_TO_PIX_FREQ, + REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq); + + if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end); + if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end); + if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n", + dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start); + if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal) + DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal); + if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler); + if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler); + if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq) + DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n", + dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq); + + /* DLG - Per luma/chroma */ + REG_GET(VBLANK_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l); + if (REG(NOM_PARAMETERS_0)) + REG_GET(NOM_PARAMETERS_0, + DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l); + if (REG(NOM_PARAMETERS_1)) + REG_GET(NOM_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l); + REG_GET(NOM_PARAMETERS_4, + DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l); + REG_GET(NOM_PARAMETERS_5, + REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l); + REG_GET_2(PER_LINE_DELIVERY, + REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l, + REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c); + REG_GET_2(PER_LINE_DELIVERY_PRE, + REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l, + REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c); + REG_GET(VBLANK_PARAMETERS_2, + REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c); + if (REG(NOM_PARAMETERS_2)) + REG_GET(NOM_PARAMETERS_2, + DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c); + if (REG(NOM_PARAMETERS_3)) + REG_GET(NOM_PARAMETERS_3, + REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c); + REG_GET(NOM_PARAMETERS_6, + DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c); + REG_GET(NOM_PARAMETERS_7, + REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c); + REG_GET(VBLANK_PARAMETERS_3, + REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l); + REG_GET(VBLANK_PARAMETERS_4, + REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c); + + if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l); + if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l); + if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l); + if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l); + if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l); + if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l); + if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c); + if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c); + if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c); + if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c); + if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c); + if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c); + if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l); + if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c); + if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l); + if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c); + + /* TTU - per hubp */ + REG_GET_2(DCN_TTU_QOS_WM, + QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm, + QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm); + + if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm); + if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm); + + /* TTU - per luma/chroma */ + /* Assumed surf0 is luma and 1 is chroma */ + REG_GET_3(DCN_SURF0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l); + REG_GET_3(DCN_SURF1_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c); + REG_GET_3(DCN_CUR0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0); + REG_GET(FLIP_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l); + REG_GET(DCN_CUR0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0); + REG_GET(DCN_CUR1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1); + REG_GET(DCN_SURF0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l); + REG_GET(DCN_SURF1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c); + + if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l); + if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l); + if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l); + if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c); + if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c); + if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c); + if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0); + if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0); + if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0); + if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l); + if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0); + if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1) + DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1); + if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l); + if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c); + + /* Host VM deadline regs */ + REG_GET(VBLANK_PARAMETERS_5, + REFCYC_PER_VM_GROUP_VBLANK, &dlg_attr.refcyc_per_vm_group_vblank); + REG_GET(VBLANK_PARAMETERS_6, + REFCYC_PER_VM_REQ_VBLANK, &dlg_attr.refcyc_per_vm_req_vblank); + REG_GET(FLIP_PARAMETERS_3, + REFCYC_PER_VM_GROUP_FLIP, &dlg_attr.refcyc_per_vm_group_flip); + REG_GET(FLIP_PARAMETERS_4, + REFCYC_PER_VM_REQ_FLIP, &dlg_attr.refcyc_per_vm_req_flip); + REG_GET(FLIP_PARAMETERS_5, + REFCYC_PER_PTE_GROUP_FLIP_C, &dlg_attr.refcyc_per_pte_group_flip_c); + REG_GET(FLIP_PARAMETERS_6, + REFCYC_PER_META_CHUNK_FLIP_C, &dlg_attr.refcyc_per_meta_chunk_flip_c); + REG_GET(FLIP_PARAMETERS_2, + REFCYC_PER_META_CHUNK_FLIP_L, &dlg_attr.refcyc_per_meta_chunk_flip_l); + + if (dlg_attr.refcyc_per_vm_group_vblank != dml_dlg_attr->refcyc_per_vm_group_vblank) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_5:REFCYC_PER_VM_GROUP_VBLANK - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_group_vblank, dlg_attr.refcyc_per_vm_group_vblank); + if (dlg_attr.refcyc_per_vm_req_vblank != dml_dlg_attr->refcyc_per_vm_req_vblank) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_6:REFCYC_PER_VM_REQ_VBLANK - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_req_vblank, dlg_attr.refcyc_per_vm_req_vblank); + if (dlg_attr.refcyc_per_vm_group_flip != dml_dlg_attr->refcyc_per_vm_group_flip) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_3:REFCYC_PER_VM_GROUP_FLIP - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_group_flip, dlg_attr.refcyc_per_vm_group_flip); + if (dlg_attr.refcyc_per_vm_req_flip != dml_dlg_attr->refcyc_per_vm_req_flip) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_4:REFCYC_PER_VM_REQ_FLIP - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_req_flip, dlg_attr.refcyc_per_vm_req_flip); + if (dlg_attr.refcyc_per_pte_group_flip_c != dml_dlg_attr->refcyc_per_pte_group_flip_c) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_5:REFCYC_PER_PTE_GROUP_FLIP_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_flip_c, dlg_attr.refcyc_per_pte_group_flip_c); + if (dlg_attr.refcyc_per_meta_chunk_flip_c != dml_dlg_attr->refcyc_per_meta_chunk_flip_c) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_6:REFCYC_PER_META_CHUNK_FLIP_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_flip_c, dlg_attr.refcyc_per_meta_chunk_flip_c); + if (dlg_attr.refcyc_per_meta_chunk_flip_l != dml_dlg_attr->refcyc_per_meta_chunk_flip_l) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_2:REFCYC_PER_META_CHUNK_FLIP_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l); +} + void hubp21_init(struct hubp *hubp) { // DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta @@ -286,6 +630,7 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_clear_underflow = hubp1_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp21_init, + .validate_dml_output = hubp21_validate_dml_output, }; bool hubp21_construct( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 62b2d24cd1d3..9793da0f3c7e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -157,6 +157,13 @@ struct hubp_funcs { struct hubp *hubp, bool enable); + void (*validate_dml_output)( + struct hubp *hubp, + struct dc_context *ctx, + struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, + struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr); + }; #endif -- cgit From a6e4da40bda5682072f1b26b19ce3d25a1b43bf7 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 5 Nov 2019 12:51:51 -0500 Subject: drm/amd/display: Spin for DMCUB PHY init in DC [Why] DCN will hang if we access registers before PHY init is done. So we need to spin or abort. [How] On hardware with DMCUB running and working we shouldn't time out waiting for this to finish and we shouldn't hit the spin cycle. If there's no hardware support then we should exit out of the function early assuming that PHY init was already done elsewhere. If we hit the timeout then there's likely a bug in firmware or software and we need to debug - add errors and asserts as appropriate. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Tony Cheng Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 8d348a5f5599..59c298a6484f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -112,10 +112,23 @@ void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv) struct dc_context *dc_ctx = dc_dmub_srv->ctx; enum dmub_status status; - status = dmub_srv_wait_for_phy_init(dmub, 10000000); - if (status != DMUB_STATUS_OK) { - DC_ERROR("Error waiting for DMUB phy init: status=%d\n", - status); + for (;;) { + /* Wait up to a second for PHY init. */ + status = dmub_srv_wait_for_phy_init(dmub, 1000000); + if (status == DMUB_STATUS_OK) + /* Initialization OK */ + break; + + DC_ERROR("DMCUB PHY init failed: status=%d\n", status); ASSERT(0); + + if (status != DMUB_STATUS_TIMEOUT) + /* + * Server likely initialized or we don't have + * DMCUB HW support - this won't end. + */ + break; + + /* Continue spinning so we don't hang the ASIC. */ } } -- cgit From f6fe4053b91fab916eaf69de48a35f6acc598140 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Mon, 4 Nov 2019 17:44:23 -0500 Subject: drm/amd/display: Use a temporary copy of the current state when updating DSC config [why] When updating DSC config, a new config has to be validated before proceeding with applying the update. Validation, however, modifies the current state. This means DSC config validation would affect pipe re-assignment, causing intermittent screen corruption issues when ODM is required for DSC. [how] - Use a copy of the current state for modified DSC config validation - Set the update type to FULL_UPDATE to correctly validate and set the actual state used for committing the streams Signed-off-by: Nikola Cornij Reviewed-by: Dmytro Laktyushkin Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 61dd373b4715..c7db4f4810c6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1677,6 +1677,9 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->output_csc_transform || stream_update->output_color_space) su_flags->bits.out_csc = 1; + + if (stream_update->dsc_config) + overall_type = UPDATE_TYPE_FULL; } for (i = 0 ; i < surface_count; i++) { @@ -1868,8 +1871,10 @@ static void copy_surface_update_to_plane( static void copy_stream_update_to_stream(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, - const struct dc_stream_update *update) + struct dc_stream_update *update) { + struct dc_context *dc_ctx = dc->ctx; + if (update == NULL || stream == NULL) return; @@ -1946,12 +1951,24 @@ static void copy_stream_update_to_stream(struct dc *dc, uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && update->dsc_config->num_slices_v != 0); - stream->timing.dsc_cfg = *update->dsc_config; - stream->timing.flags.DSC = enable_dsc; - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, - true)) { - stream->timing.dsc_cfg = old_dsc_cfg; - stream->timing.flags.DSC = old_dsc_enabled; + /* Use temporarry context for validating new DSC config */ + struct dc_state *dsc_validate_context = dc_create_state(dc); + + if (dsc_validate_context) { + dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); + + stream->timing.dsc_cfg = *update->dsc_config; + stream->timing.flags.DSC = enable_dsc; + if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { + stream->timing.dsc_cfg = old_dsc_cfg; + stream->timing.flags.DSC = old_dsc_enabled; + update->dsc_config = false; + } + + dc_release_state(dsc_validate_context); + } else { + DC_ERROR("Failed to allocate new validate context for DSC change\n"); + update->dsc_config = false; } } } -- cgit From e8cfbdac9a07c4faad60ef9bc82c53079faf4557 Mon Sep 17 00:00:00 2001 From: Ilya Bakoulin Date: Thu, 3 Oct 2019 17:35:32 -0400 Subject: drm/amd/display: Add DSC 422Native debug option [Why] Need to be able to enable native 422 for debugging purposes. [How] Add new dc_debug_options bool and check it in the get_dsc_enc_caps function. Signed-off-by: Ilya Bakoulin Reviewed-by: Charlene Liu Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index f30c77e44bb4..3e6133f8cdc4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -362,6 +362,7 @@ struct dc_debug_options { bool disable_hubp_power_gate; bool disable_dsc_power_gate; int dsc_min_slice_height_override; + bool native422_support; bool disable_pplib_wm_range; enum wm_report_mode pplib_wm_report_mode; unsigned int min_disp_clk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index dabd3b7a4cdc..ec86ba73a039 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -25,6 +25,7 @@ #include "dc_hw_types.h" #include "dsc.h" #include +#include "dc.h" struct dc_dsc_policy { bool use_min_slices_h; @@ -236,8 +237,11 @@ static void get_dsc_enc_caps( // This is a static HW query, so we can use any DSC memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); - if (dsc) + if (dsc) { dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + if (dsc->ctx->dc->debug.native422_support) + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; + } } /* Returns 'false' if no intersection was found for at least one capablity. -- cgit From f020220e16a8c0a99eec2016d754e5aca1c6d947 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 4 Nov 2019 15:36:16 -0500 Subject: drm/amd/display: Add Navi10 DMUB VBIOS code [Why] We need some extra dmub_cmd_type for NV10 [How] Add command table functions in DMUB firmware. Signed-off-by: Nicholas Kazlauskas Signed-off-by: Xiong Yan Reviewed-by: Tony Cheng Acked-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index b25f92e3280d..43f1cd647aab 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -45,6 +45,17 @@ enum dmub_cmd_type { DMUB_CMD__ENABLE_DISP_POWER_GATING, DMUB_CMD__DPPHY_INIT, DMUB_CMD__DIG1_TRANSMITTER_CONTROL, + DMUB_CMD__SETUP_DISPLAY_MODE, + DMUB_CMD__BLANK_CRTC, + DMUB_CMD__ENABLE_DISPPATH, + DMUB_CMD__DISABLE_DISPPATH, + DMUB_CMD__DISABLE_DISPPATH_OUTPUT, + DMUB_CMD__READ_DISPPATH_EDID, + DMUB_CMD__DP_PRE_LINKTRAINING, + DMUB_CMD__INIT_CONTROLLER, + DMUB_CMD__RESET_CONTROLLER, + DMUB_CMD__SET_BRI_LEVEL, + DMUB_CMD__LVTMA_CONTROL, // PSR DMUB_CMD__PSR_ENABLE, -- cgit From 11a00965d26156dbcb3fdaac6f9b00ef5e67cc87 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 18 Sep 2019 11:19:51 -0400 Subject: drm/amd/display: Add PSP block to verify HDCP2.2 steps [Why] All the HDCP transactions should be verified using PSP [How] This patch adds the psp calls we need to verify the steps Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h | 44 ++ .../gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 502 ++++++++++++++++++++- .../gpu/drm/amd/display/modules/hdcp/hdcp_psp.h | 194 ++++++++ 3 files changed, 739 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index 5664bc0b5bd0..d83f0ab1cadb 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -111,8 +111,33 @@ struct mod_hdcp_message_hdcp1 { uint16_t binfo_dp; }; +struct mod_hdcp_message_hdcp2 { + uint8_t hdcp2version_hdmi; + uint8_t rxcaps_dp[3]; + uint16_t rxstatus; + + uint8_t ake_init[12]; + uint8_t ake_cert[534]; + uint8_t ake_no_stored_km[129]; + uint8_t ake_stored_km[33]; + uint8_t ake_h_prime[33]; + uint8_t ake_pairing_info[17]; + uint8_t lc_init[9]; + uint8_t lc_l_prime[33]; + uint8_t ske_eks[25]; + uint8_t rx_id_list[177]; // 22 + 5 * 31 + uint16_t rx_id_list_size; + uint8_t repeater_auth_ack[17]; + uint8_t repeater_auth_stream_manage[68]; // 6 + 2 * 31 + uint16_t stream_manage_size; + uint8_t repeater_auth_stream_ready[33]; + + uint8_t content_stream_type_dp[2]; +}; + union mod_hdcp_message { struct mod_hdcp_message_hdcp1 hdcp1; + struct mod_hdcp_message_hdcp2 hdcp2; }; struct mod_hdcp_auth_counters { @@ -234,6 +259,25 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption( enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp, enum mod_hdcp_encryption_status *encryption_status); +enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status); + /* ddc functions */ enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 646d909bbc37..ddba0cfa5722 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -31,6 +31,19 @@ #include "amdgpu.h" #include "hdcp_psp.h" +static void hdcp2_message_init(struct mod_hdcp *hdcp, + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *in) +{ + in->session_handle = hdcp->auth.id; + in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; + in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; + in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; + in->process.msg1_desc.msg_size = 0; + in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; + in->process.msg2_desc.msg_size = 0; + in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; + in->process.msg3_desc.msg_id = 0; +} enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp) { @@ -42,7 +55,7 @@ enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp) dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) { + if (is_display_added(&(hdcp->connection.displays[i]))) { memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); @@ -326,3 +339,490 @@ enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp * return MOD_HDCP_STATUS_SUCCESS; } +enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + + if (!psp->hdcp_context.hdcp_initialized) { + DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized"); + return MOD_HDCP_STATUS_FAILURE; + } + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + + hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; + + if (hdcp->connection.link.adjust.hdcp2.disable_type1) + hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0; + else + hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE; + + hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle; + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE; + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_INIT; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE; + + memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.ake_init)); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT; + msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT; + + memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_cert, + sizeof(hdcp->auth.msg.hdcp2.ake_cert)); + + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM; + msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; + + memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); + + memcpy(hdcp->auth.msg.hdcp2.ake_stored_km, + &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)], + sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); + + if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) { + hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; + hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; + return MOD_HDCP_STATUS_SUCCESS; + } + + return MOD_HDCP_STATUS_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME; + msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME; + + memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_h_prime, + sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); + + if (!hdcp->connection.is_km_stored) { + msg_in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO; + msg_in->process.msg2_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO; + memcpy(&msg_in->process.receiver_message[sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)], + hdcp->auth.msg.hdcp2.ake_pairing_info, sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); + } + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; + + if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; + else if (!hdcp->connection.is_km_stored && + msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE; + + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__LC_INIT; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE; + + memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.lc_init)); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME; + msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME; + + memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.lc_l_prime, + sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE; + + if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE; + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS; + + if (is_dp_hdcp(hdcp)) + msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE; + + memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.ske_eks)); + msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks); + + if (is_dp_hdcp(hdcp)) { + memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp, + &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)], + sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + + hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE; + + if (!is_dp_mst_hdcp(hdcp)) { + display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST; + msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.rx_id_list); + memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.rx_id_list, + sizeof(hdcp->auth.msg.hdcp2.rx_id_list)); + + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE; + + memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); + + if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) { + hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; + hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; + return MOD_HDCP_STATUS_SUCCESS; + } + + + return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + uint8_t i; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->connection.displays[i].adjust.disable) + continue; + hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + break; + + hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + } + + return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE; + + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE; + + hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size; + + memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage)); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY; + + msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready); + + memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + return (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) && + (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id; + hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS; + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_FAILURE; + + if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) { + if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1) + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON; + else + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON; + } + + return MOD_HDCP_STATUS_SUCCESS; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h index 986fc07ea9ea..82a5e997d573 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h @@ -36,6 +36,11 @@ enum bgd_security_hdcp_encryption_level { HDCP_ENCRYPTION_LEVEL__ON }; +enum bgd_security_hdcp2_content_type { + HDCP2_CONTENT_TYPE__INVALID = 0, + HDCP2_CONTENT_TYPE__TYPE0, + HDCP2_CONTENT_TYPE__TYPE1 +}; enum ta_dtm_command { TA_DTM_COMMAND__UNUSED_1 = 1, TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2, @@ -121,8 +126,64 @@ enum ta_hdcp_command { TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION, TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION, TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS, + TA_HDCP_COMMAND__UNUSED_1, + TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION, + TA_HDCP_COMMAND__UNUSED_2, + TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION, + TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS, + TA_HDCP_COMMAND__UNUSED_3, + TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2, + TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2, + TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION +}; + +enum ta_hdcp2_msg_id { + TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE = 1, + TA_HDCP_HDCP2_MSG_ID__AKE_INIT = 2, + TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT = 3, + TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM = 4, + TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM = 5, + TA_HDCP_HDCP2_MSG_ID__AKE_SEND_RRX = 6, + TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME = 7, + TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO = 8, + TA_HDCP_HDCP2_MSG_ID__LC_INIT = 9, + TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME = 10, + TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS = 11, + TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST = 12, + TA_HDCP_HDCP2_MSG_ID__RTT_READY = 13, + TA_HDCP_HDCP2_MSG_ID__RTT_CHALLENGE = 14, + TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK = 15, + TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE = 16, + TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY = 17, + TA_HDCP_HDCP2_MSG_ID__RECEIVER_AUTH_STATUS = 18, + TA_HDCP_HDCP2_MSG_ID__AKE_TRANSMITTER_INFO = 19, + TA_HDCP_HDCP2_MSG_ID__AKE_RECEIVER_INFO = 20, + TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP = 129 }; +enum ta_hdcp2_hdcp2_msg_id_max_size { + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__NULL_MESSAGE = 0, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_INIT = 12, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT = 534, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM = 129, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM = 33, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_RRX = 9, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME = 33, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO = 17, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_INIT = 9, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME = 33, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SKE_SEND_EKS = 25, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RECEIVERID_LIST = 181, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_READY = 1, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_CHALLENGE = 17, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RACK = 17, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_MANAGE = 13, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_READY = 33, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RECEIVER_AUTH_STATUS = 4, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_TRANSMITTER_INFO = 6, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO = 6, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SIGNAL_CONTENT_STREAM_TYPE_DP = 1 +}; /* HDCP related enumerations */ /**********************************************************/ @@ -131,6 +192,12 @@ enum ta_hdcp_command { #define TA_HDCP__HDCP1_KSV_SIZE 5 #define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127 #define TA_HDCP__HDCP1_V_PRIME_SIZE 20 +#define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE \ + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6 + +// 64 bits boundaries +#define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE \ + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4 enum ta_hdcp_status { TA_HDCP_STATUS__SUCCESS = 0x00, @@ -165,9 +232,47 @@ enum ta_hdcp_authentication_status { TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02, TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03, TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04, + TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06, + TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07, + TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08, TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09 }; +enum ta_hdcp2_msg_authentication_status { + TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS = 0, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__KM_NOT_AVAILABLE, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNUSED, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID = 100, // everything above does not fail the request + TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_ENOUGH_MEMORY, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_EXPECTED_MSG, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__SIGNATURE_CERTIFICAT_ERROR, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INCORRECT_HDCP_VERSION, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNKNOWN_MESSAGE, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_HMAC, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_TOPOLOGY, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST +}; + +enum ta_hdcp_content_type { + TA_HDCP2_CONTENT_TYPE__TYPE0 = 1, + TA_HDCP2_CONTENT_TYPE__TYPE1, +}; + +enum ta_hdcp_content_type_negotiation_type { + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0 = 1, + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1, + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED +}; + +enum ta_hdcp2_version { + TA_HDCP2_VERSION_UNKNOWN = 0, + TA_HDCP2_VERSION_2_0 = 20, + TA_HDCP2_VERSION_2_1 = 21, + TA_HDCP2_VERSION_2_2 = 22 +}; /* input/output structures for HDCP commands */ /**********************************************************/ @@ -232,6 +337,84 @@ struct ta_hdcp_cmd_hdcp1_get_encryption_status_output { uint32_t protection_level; }; +struct ta_hdcp_cmd_hdcp2_create_session_input_v2 { + uint32_t display_handle; + enum ta_hdcp_content_type_negotiation_type negotiate_content_type; +}; + +struct ta_hdcp_cmd_hdcp2_create_session_output_v2 { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_destroy_session_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_authentication_message_v2 { + enum ta_hdcp2_msg_id msg_id; + uint32_t msg_size; +}; + +struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 { + struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc; + struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc; + struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg3_desc; + uint8_t receiver_message[TA_HDCP__HDCP2_RX_BUF_MAX_SIZE]; +}; + +struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 { + uint32_t hdcp_version; + uint32_t is_km_stored; + uint32_t is_locality_precompute_support; + uint32_t is_repeater; + enum ta_hdcp2_msg_authentication_status msg1_status; + enum ta_hdcp2_msg_authentication_status msg2_status; + enum ta_hdcp2_msg_authentication_status msg3_status; +}; + +struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 { + enum ta_hdcp2_msg_id msg1_id; + enum ta_hdcp2_msg_id msg2_id; +}; + +struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 { + enum ta_hdcp2_msg_authentication_status msg1_status; + enum ta_hdcp2_msg_authentication_status msg2_status; + struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc; + struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc; + uint8_t transmitter_message[TA_HDCP__HDCP2_TX_BUF_MAX_SIZE]; +}; + +struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 { + uint32_t session_handle; + struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 process; + struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 prepare; +}; + +struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 { + uint32_t authentication_status; + struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 process; + struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 prepare; +}; + +struct ta_hdcp_cmd_hdcp2_set_encryption_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_get_encryption_status_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_get_encryption_status_output { + enum ta_hdcp_content_type hdcp2_type; + uint32_t protection_level; +}; + +struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input { + uint32_t session_handle; + uint32_t display_handle; +}; + /**********************************************************/ /* Common input structure for HDCP callbacks */ union ta_hdcp_cmd_input { @@ -242,6 +425,13 @@ union ta_hdcp_cmd_input { struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption; struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption; struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status; + struct ta_hdcp_cmd_hdcp2_destroy_session_input hdcp2_destroy_session; + struct ta_hdcp_cmd_hdcp2_set_encryption_input hdcp2_set_encryption; + struct ta_hdcp_cmd_hdcp2_get_encryption_status_input hdcp2_get_encryption_status; + struct ta_hdcp_cmd_hdcp2_create_session_input_v2 hdcp2_create_session_v2; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 + hdcp2_prepare_process_authentication_message_v2; + struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption; }; /* Common output structure for HDCP callbacks */ @@ -250,6 +440,10 @@ union ta_hdcp_cmd_output { struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication; struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication; struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status; + struct ta_hdcp_cmd_hdcp2_get_encryption_status_output hdcp2_get_encryption_status; + struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 + hdcp2_prepare_process_authentication_message_v2; }; /**********************************************************/ -- cgit From eff682f83c9c2030761e7536c5d97e1b20f71c15 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 18 Sep 2019 11:23:07 -0400 Subject: drm/amd/display: Add DDC handles for HDCP2.2 [Why] We need these to read and write to aux/i2c, during authentication [How] Create read/write functions for all the steps (Eg, h_prime, paring_info etc) Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c | 326 +++++++++++++++++++++ 1 file changed, 326 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c index e7baae059b85..8059aff9911f 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -51,6 +51,26 @@ enum mod_hdcp_ddc_message_id { MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, MOD_HDCP_MESSAGE_ID_READ_BINFO, + /* HDCP 2.2 */ + + MOD_HDCP_MESSAGE_ID_HDCP2VERSION, + MOD_HDCP_MESSAGE_ID_RX_CAPS, + MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT, + MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, + MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, + MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, + MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, + MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, + MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT, + MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, + MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, + MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, + MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, + MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, + MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, + MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + MOD_HDCP_MESSAGE_ID_MAX }; @@ -70,6 +90,22 @@ static const uint8_t hdcp_i2c_offsets[] = { [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41, [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43, [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, + [MOD_HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80, + [MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60, + [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, + [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, + [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, + [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, + [MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70, + [MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0 }; static const uint32_t hdcp_dpcd_addrs[] = { @@ -88,6 +124,22 @@ static const uint32_t hdcp_dpcd_addrs[] = { [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029, [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c, [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, + [MOD_HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0, + [MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0, + [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, + [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, + [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, + [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, + [MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493, + [MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494 }; static enum mod_hdcp_status read(struct mod_hdcp *hdcp, @@ -303,3 +355,277 @@ enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp) hdcp->auth.msg.hdcp1.an, sizeof(hdcp->auth.msg.hdcp1.an)); } + +enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = MOD_HDCP_STATUS_INVALID_OPERATION; + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_HDCP2VERSION, + &hdcp->auth.msg.hdcp2.hdcp2version_hdmi, + sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); + + return status; +} + +enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (!is_dp_hdcp(hdcp)) + status = MOD_HDCP_STATUS_INVALID_OPERATION; + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_RX_CAPS, + hdcp->auth.msg.hdcp2.rxcaps_dp, + sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); + + return status; +} + +enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, + 1); + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, + sizeof(hdcp->auth.msg.hdcp2.rxstatus)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.ake_cert[0] = 3; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, + hdcp->auth.msg.hdcp2.ake_cert+1, + sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, + hdcp->auth.msg.hdcp2.ake_cert, + sizeof(hdcp->auth.msg.hdcp2.ake_cert)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, + hdcp->auth.msg.hdcp2.ake_h_prime+1, + sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, + hdcp->auth.msg.hdcp2.ake_h_prime, + sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, + hdcp->auth.msg.hdcp2.ake_pairing_info+1, + sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, + hdcp->auth.msg.hdcp2.ake_pairing_info, + sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, + hdcp->auth.msg.hdcp2.lc_l_prime+1, + sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, + hdcp->auth.msg.hdcp2.lc_l_prime, + sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.rx_id_list[0] = 12; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + hdcp->auth.msg.hdcp2.rx_id_list+1, + sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + hdcp->auth.msg.hdcp2.rx_id_list, + hdcp->auth.msg.hdcp2.rx_id_list_size); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, + hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1, + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, + hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT, + hdcp->auth.msg.hdcp2.ake_init+1, + sizeof(hdcp->auth.msg.hdcp2.ake_init)-1); + else + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT, + hdcp->auth.msg.hdcp2.ake_init, + sizeof(hdcp->auth.msg.hdcp2.ake_init)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, + hdcp->auth.msg.hdcp2.ake_no_stored_km+1, + sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)-1); + else + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, + hdcp->auth.msg.hdcp2.ake_no_stored_km, + sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, + hdcp->auth.msg.hdcp2.ake_stored_km+1, + sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)-1); + else + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, + hdcp->auth.msg.hdcp2.ake_stored_km, + sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT, + hdcp->auth.msg.hdcp2.lc_init+1, + sizeof(hdcp->auth.msg.hdcp2.lc_init)-1); + else + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT, + hdcp->auth.msg.hdcp2.lc_init, + sizeof(hdcp->auth.msg.hdcp2.lc_init)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, + MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, + hdcp->auth.msg.hdcp2.ske_eks+1, + sizeof(hdcp->auth.msg.hdcp2.ske_eks)-1); + else + status = write(hdcp, + MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, + hdcp->auth.msg.hdcp2.ske_eks, + sizeof(hdcp->auth.msg.hdcp2.ske_eks)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, + hdcp->auth.msg.hdcp2.repeater_auth_ack+1, + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)-1); + else + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, + hdcp->auth.msg.hdcp2.repeater_auth_ack, + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, + MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, + hdcp->auth.msg.hdcp2.repeater_auth_stream_manage+1, + hdcp->auth.msg.hdcp2.stream_manage_size-1); + else + status = write(hdcp, + MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, + hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, + hdcp->auth.msg.hdcp2.stream_manage_size); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + hdcp->auth.msg.hdcp2.content_stream_type_dp+1, + sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)-1); + else + status = MOD_HDCP_STATUS_INVALID_OPERATION; + return status; +} -- cgit From 51466b3fd2725bfb0de629f71c0854ff276d50ae Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 18 Sep 2019 11:18:15 -0400 Subject: drm/amd/display: Add execution and transition states for HDCP2.2 The module works like a state machine +-------------+ ------> | Execution.c | ------ | +-------------+ | | V +----+ +--------+ +--------------+ | DM | -----> | Hdcp.c | <------------ | Transition.c | +----+ <----- +--------+ +--------------+ This patch adds the execution and transition files for 2.2 Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/Makefile | 3 +- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c | 86 +- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h | 127 +++ .../drm/amd/display/modules/hdcp/hdcp2_execution.c | 881 +++++++++++++++++++++ .../amd/display/modules/hdcp/hdcp2_transition.c | 674 ++++++++++++++++ drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h | 2 + 6 files changed, 1764 insertions(+), 9 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c create mode 100644 drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile index 1c3c6d47973a..904424da01b5 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile +++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile @@ -24,7 +24,8 @@ # HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \ - hdcp1_execution.o hdcp1_transition.o + hdcp1_execution.o hdcp1_transition.o \ + hdcp2_execution.o hdcp2_transition.o AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP)) #$(info ************ DAL-HDCP_MAKEFILE ************) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index d7ac445dec6f..a74812977963 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -37,24 +37,52 @@ static void push_error_status(struct mod_hdcp *hdcp, HDCP_ERROR_TRACE(hdcp, status); } - hdcp->connection.hdcp1_retry_count++; + if (is_hdcp1(hdcp)) { + hdcp->connection.hdcp1_retry_count++; + } else if (is_hdcp2(hdcp)) { + hdcp->connection.hdcp2_retry_count++; + } } static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp) { - int i, display_enabled = 0; + int i, is_auth_needed = 0; - /* if all displays on the link are disabled, hdcp is not desired */ + /* if all displays on the link don't need authentication, + * hdcp is not desired + */ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && !hdcp->connection.displays[i].adjust.disable) { - display_enabled = 1; + is_auth_needed = 1; break; } } return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) && - display_enabled && !hdcp->connection.link.adjust.hdcp1.disable; + is_auth_needed && + !hdcp->connection.link.adjust.hdcp1.disable; +} + +static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp) +{ + int i, is_auth_needed = 0; + + /* if all displays on the link don't need authentication, + * hdcp is not desired + */ + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->connection.displays[i].adjust.disable) { + is_auth_needed = 1; + break; + } + } + + return (hdcp->connection.hdcp2_retry_count < MAX_NUM_OF_ATTEMPTS) && + is_auth_needed && + !hdcp->connection.link.adjust.hdcp2.disable && + !hdcp->connection.is_hdcp2_revoked; } static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, @@ -82,6 +110,11 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, } else if (is_in_hdcp1_dp_states(hdcp)) { status = mod_hdcp_hdcp1_dp_execution(hdcp, event_ctx, &input->hdcp1); + } else if (is_in_hdcp2_states(hdcp)) { + status = mod_hdcp_hdcp2_execution(hdcp, event_ctx, &input->hdcp2); + } else if (is_in_hdcp2_dp_states(hdcp)) { + status = mod_hdcp_hdcp2_dp_execution(hdcp, + event_ctx, &input->hdcp2); } out: return status; @@ -99,7 +132,10 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp, if (is_in_initialized_state(hdcp)) { if (is_dp_hdcp(hdcp)) - if (is_cp_desired_hdcp1(hdcp)) { + if (is_cp_desired_hdcp2(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A0_DETERMINE_RX_HDCP_CAPABLE); + } else if (is_cp_desired_hdcp1(hdcp)) { callback_in_ms(0, output); set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE); } else { @@ -107,7 +143,10 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp, set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); } else if (is_hdmi_dvi_sl_hdcp(hdcp)) - if (is_cp_desired_hdcp1(hdcp)) { + if (is_cp_desired_hdcp2(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A0_KNOWN_HDCP2_CAPABLE_RX); + } else if (is_cp_desired_hdcp1(hdcp)) { callback_in_ms(0, output); set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX); } else { @@ -126,6 +165,12 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp, } else if (is_in_hdcp1_dp_states(hdcp)) { status = mod_hdcp_hdcp1_dp_transition(hdcp, event_ctx, &input->hdcp1, output); + } else if (is_in_hdcp2_states(hdcp)) { + status = mod_hdcp_hdcp2_transition(hdcp, + event_ctx, &input->hdcp2, output); + } else if (is_in_hdcp2_dp_states(hdcp)) { + status = mod_hdcp_hdcp2_dp_transition(hdcp, + event_ctx, &input->hdcp2, output); } else { status = MOD_HDCP_STATUS_INVALID_STATE; } @@ -139,9 +184,13 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp, enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; if (is_hdcp1(hdcp)) { - if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) + if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) { + /* TODO - update psp to unify create session failure + * recovery between hdcp1 and 2. + */ mod_hdcp_hdcp1_destroy_session(hdcp); + } if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) { status = mod_hdcp_remove_display_topology(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) { @@ -154,6 +203,27 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp, memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); set_state_id(hdcp, output, HDCP_INITIALIZED); + } else if (is_hdcp2(hdcp)) { + if (hdcp->auth.trans_input.hdcp2.create_session == PASS) { + status = mod_hdcp_hdcp2_destroy_session(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + } + if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) { + status = mod_hdcp_remove_display_topology(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); + memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); + memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); + set_state_id(hdcp, output, HDCP_INITIALIZED); } else if (is_in_cp_not_desired_state(hdcp)) { status = mod_hdcp_remove_display_topology(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index d83f0ab1cadb..9887c5ea6d5f 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -44,11 +44,13 @@ #define BINFO_MAX_DEVS_EXCEEDED_MASK_DP 0x0080 #define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800 +#define VERSION_HDCP2_MASK 0x04 #define RXSTATUS_MSG_SIZE_MASK 0x03FF #define RXSTATUS_READY_MASK 0x0400 #define RXSTATUS_REAUTH_REQUEST_MASK 0x0800 #define RXIDLIST_DEVICE_COUNT_LOWER_MASK 0xf0 #define RXIDLIST_DEVICE_COUNT_UPPER_MASK 0x01 +#define RXCAPS_BYTE2_HDCP2_VERSION_DP 0x02 #define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP 0x02 #define RXSTATUS_READY_MASK_DP 0x0001 #define RXSTATUS_H_P_AVAILABLE_MASK_DP 0x0002 @@ -92,8 +94,52 @@ struct mod_hdcp_transition_input_hdcp1 { uint8_t stream_encryption_dp; }; +struct mod_hdcp_transition_input_hdcp2 { + uint8_t hdcp2version_read; + uint8_t hdcp2_capable_check; + uint8_t add_topology; + uint8_t create_session; + uint8_t ake_init_prepare; + uint8_t ake_init_write; + uint8_t rxstatus_read; + uint8_t ake_cert_available; + uint8_t ake_cert_read; + uint8_t ake_cert_validation; + uint8_t stored_km_write; + uint8_t no_stored_km_write; + uint8_t h_prime_available; + uint8_t h_prime_read; + uint8_t pairing_available; + uint8_t pairing_info_read; + uint8_t h_prime_validation; + uint8_t lc_init_prepare; + uint8_t lc_init_write; + uint8_t l_prime_available_poll; + uint8_t l_prime_read; + uint8_t l_prime_validation; + uint8_t eks_prepare; + uint8_t eks_write; + uint8_t enable_encryption; + uint8_t reauth_request_check; + uint8_t rx_id_list_read; + uint8_t device_count_check; + uint8_t rx_id_list_validation; + uint8_t repeater_auth_ack_write; + uint8_t prepare_stream_manage; + uint8_t stream_manage_write; + uint8_t stream_ready_available; + uint8_t stream_ready_read; + uint8_t stream_ready_validation; + + uint8_t rx_caps_read_dp; + uint8_t content_stream_type_write; + uint8_t link_integrity_check_dp; + uint8_t stream_encryption_dp; +}; + union mod_hdcp_transition_input { struct mod_hdcp_transition_input_hdcp1 hdcp1; + struct mod_hdcp_transition_input_hdcp2 hdcp2; }; struct mod_hdcp_message_hdcp1 { @@ -150,8 +196,10 @@ struct mod_hdcp_connection { struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; uint8_t is_repeater; uint8_t is_km_stored; + uint8_t is_hdcp2_revoked; struct mod_hdcp_trace trace; uint8_t hdcp1_retry_count; + uint8_t hdcp2_retry_count; }; /* contains values per authentication cycle */ @@ -219,6 +267,50 @@ enum mod_hdcp_hdcp1_dp_state_id { HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST, }; +enum mod_hdcp_hdcp2_state_id { + HDCP2_STATE_START = HDCP1_DP_STATE_END, + H2_A0_KNOWN_HDCP2_CAPABLE_RX, + H2_A1_SEND_AKE_INIT, + H2_A1_VALIDATE_AKE_CERT, + H2_A1_SEND_NO_STORED_KM, + H2_A1_READ_H_PRIME, + H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME, + H2_A1_SEND_STORED_KM, + H2_A1_VALIDATE_H_PRIME, + H2_A2_LOCALITY_CHECK, + H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER, + H2_ENABLE_ENCRYPTION, + H2_A5_AUTHENTICATED, + H2_A6_WAIT_FOR_RX_ID_LIST, + H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK, + H2_A9_SEND_STREAM_MANAGEMENT, + H2_A9_VALIDATE_STREAM_READY, + HDCP2_STATE_END = H2_A9_VALIDATE_STREAM_READY, +}; + +enum mod_hdcp_hdcp2_dp_state_id { + HDCP2_DP_STATE_START = HDCP2_STATE_END, + D2_A0_DETERMINE_RX_HDCP_CAPABLE, + D2_A1_SEND_AKE_INIT, + D2_A1_VALIDATE_AKE_CERT, + D2_A1_SEND_NO_STORED_KM, + D2_A1_READ_H_PRIME, + D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME, + D2_A1_SEND_STORED_KM, + D2_A1_VALIDATE_H_PRIME, + D2_A2_LOCALITY_CHECK, + D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER, + D2_SEND_CONTENT_STREAM_TYPE, + D2_ENABLE_ENCRYPTION, + D2_A5_AUTHENTICATED, + D2_A6_WAIT_FOR_RX_ID_LIST, + D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK, + D2_A9_SEND_STREAM_MANAGEMENT, + D2_A9_VALIDATE_STREAM_READY, + HDCP2_DP_STATE_END = D2_A9_VALIDATE_STREAM_READY, + HDCP_STATE_END = HDCP2_DP_STATE_END, +}; + /* hdcp1 executions and transitions */ typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp); uint8_t mod_hdcp_execute_and_set( @@ -239,6 +331,22 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, struct mod_hdcp_transition_input_hdcp1 *input, struct mod_hdcp_output *output); +/* hdcp2 executions and transitions */ +enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input); +enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input); +enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input, + struct mod_hdcp_output *output); +enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input, + struct mod_hdcp_output *output); + /* log functions */ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, uint8_t *buf, uint32_t buf_size); @@ -289,6 +397,7 @@ enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp); @@ -352,11 +461,28 @@ static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp) current_state(hdcp) <= HDCP1_DP_STATE_END); } +static inline uint8_t is_in_hdcp2_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP2_STATE_START && + current_state(hdcp) <= HDCP2_STATE_END); +} + +static inline uint8_t is_in_hdcp2_dp_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP2_DP_STATE_START && + current_state(hdcp) <= HDCP2_DP_STATE_END); +} + static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp) { return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp)); } +static inline uint8_t is_hdcp2(struct mod_hdcp *hdcp) +{ + return (is_in_hdcp2_states(hdcp) || is_in_hdcp2_dp_states(hdcp)); +} + static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp) { return current_state(hdcp) == HDCP_CP_NOT_DESIRED; @@ -481,6 +607,7 @@ static inline struct mod_hdcp_display *get_empty_display_container( static inline void reset_retry_counts(struct mod_hdcp *hdcp) { hdcp->connection.hdcp1_retry_count = 0; + hdcp->connection.hdcp2_retry_count = 0; } #endif /* HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c new file mode 100644 index 000000000000..c93c8098d972 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -0,0 +1,881 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp) +{ + uint8_t is_ready = 0; + + if (is_dp_hdcp(hdcp)) + is_ready = (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_READY_MASK_DP) ? 1 : 0; + else + is_ready = ((hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_READY_MASK) && + (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK)) ? 1 : 0; + return is_ready ? MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY; +} + +static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = ((hdcp->auth.msg.hdcp2.rxcaps_dp[2] & + RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP) && + (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == + RXCAPS_BYTE2_HDCP2_VERSION_DP)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; + else + status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & VERSION_HDCP2_MASK) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; + return status; +} + +static inline enum mod_hdcp_status check_reauthentication_request( + struct mod_hdcp *hdcp) +{ + uint8_t ret = 0; + + if (is_dp_hdcp(hdcp)) + ret = (hdcp->auth.msg.hdcp2.rxstatus & + RXSTATUS_REAUTH_REQUEST_MASK_DP) ? + MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : + MOD_HDCP_STATUS_SUCCESS; + else + ret = (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_REAUTH_REQUEST_MASK) ? + MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : + MOD_HDCP_STATUS_SUCCESS; + return ret; +} + +static inline enum mod_hdcp_status check_link_integrity_failure_dp( + struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp2.rxstatus & + RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ? + MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + uint16_t size; + + if (is_dp_hdcp(hdcp)) { + status = MOD_HDCP_STATUS_SUCCESS; + } else { + status = mod_hdcp_read_rxstatus(hdcp); + if (status == MOD_HDCP_STATUS_SUCCESS) { + size = hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK; + status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING; + } + } + return status; +} + +static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + uint8_t size; + + status = mod_hdcp_read_rxstatus(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + if (is_dp_hdcp(hdcp)) { + status = (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_H_P_AVAILABLE_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; + } else { + size = hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK; + status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; + } +out: + return status; +} + +static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + uint8_t size; + + status = mod_hdcp_read_rxstatus(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + if (is_dp_hdcp(hdcp)) { + status = (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_PAIRING_AVAILABLE_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; + } else { + size = hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK; + status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; + } +out: + return status; +} + +static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + uint8_t size; + uint16_t max_wait = 20000; // units of us + uint16_t num_polls = 5; + uint16_t wait_time = max_wait / num_polls; + + if (is_dp_hdcp(hdcp)) + status = MOD_HDCP_STATUS_INVALID_OPERATION; + else + for (; num_polls; num_polls--) { + udelay(wait_time); + + status = mod_hdcp_read_rxstatus(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) + break; + + size = hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK; + status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING; + if (status == MOD_HDCP_STATUS_SUCCESS) + break; + } + return status; +} + +static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + uint8_t size; + + if (is_dp_hdcp(hdcp)) { + status = MOD_HDCP_STATUS_INVALID_OPERATION; + } else { + status = mod_hdcp_read_rxstatus(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + size = hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK; + status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING; + } +out: + return status; +} + +static inline uint8_t get_device_count(struct mod_hdcp *hdcp) +{ + return ((hdcp->auth.msg.hdcp2.rx_id_list[2] & RXIDLIST_DEVICE_COUNT_LOWER_MASK) >> 4) + + ((hdcp->auth.msg.hdcp2.rx_id_list[1] & RXIDLIST_DEVICE_COUNT_UPPER_MASK) << 4); +} + +static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) +{ + /* device count must be greater than or equal to tracked hdcp displays */ + return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? + MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static uint8_t process_rxstatus(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input, + enum mod_hdcp_status *status) +{ + if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxstatus, + &input->rxstatus_read, status, + hdcp, "rxstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_reauthentication_request, + &input->reauth_request_check, status, + hdcp, "reauth_request_check")) + goto out; + if (is_dp_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set(check_link_integrity_failure_dp, + &input->link_integrity_check_dp, status, + hdcp, "link_integrity_check_dp")) + goto out; + } + if (hdcp->connection.is_repeater) + if (check_receiver_id_list_ready(hdcp) == + MOD_HDCP_STATUS_SUCCESS) { + HDCP_INPUT_PASS_TRACE(hdcp, "rx_id_list_ready"); + event_ctx->rx_id_list_ready = 1; + if (is_dp_hdcp(hdcp)) + hdcp->auth.msg.hdcp2.rx_id_list_size = + sizeof(hdcp->auth.msg.hdcp2.rx_id_list); + else + hdcp->auth.msg.hdcp2.rx_id_list_size = + hdcp->auth.msg.hdcp2.rxstatus & 0x3FF; + } +out: + return (*status == MOD_HDCP_STATUS_SUCCESS); +} + +static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version, + &input->hdcp2version_read, &status, + hdcp, "hdcp2version_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_hdcp2_capable, + &input->hdcp2_capable_check, &status, + hdcp, "hdcp2_capable")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology, + &input->add_topology, &status, + hdcp, "add_topology")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session, + &input->create_session, &status, + hdcp, "create_session")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_ake_init, + &input->ake_init_prepare, &status, + hdcp, "ake_init_prepare")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_ake_init, + &input->ake_init_write, &status, + hdcp, "ake_init_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status validate_ake_cert(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (is_hdmi_dvi_sl_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set(check_ake_cert_available, + &input->ake_cert_available, &status, + hdcp, "ake_cert_available")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_ake_cert, + &input->ake_cert_read, &status, + hdcp, "ake_cert_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_ake_cert, + &input->ake_cert_validation, &status, + hdcp, "ake_cert_validation")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status send_no_stored_km(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_write_no_stored_km, + &input->no_stored_km_write, &status, + hdcp, "no_stored_km_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status read_h_prime(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(check_h_prime_available, + &input->h_prime_available, &status, + hdcp, "h_prime_available")) + goto out; + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime, + &input->h_prime_read, &status, + hdcp, "h_prime_read")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status read_pairing_info_and_validate_h_prime( + struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(check_pairing_info_available, + &input->pairing_available, &status, + hdcp, "pairing_available")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_pairing_info, + &input->pairing_info_read, &status, + hdcp, "pairing_info_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime, + &input->h_prime_validation, &status, + hdcp, "h_prime_validation")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status send_stored_km(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_write_stored_km, + &input->stored_km_write, &status, + hdcp, "stored_km_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status validate_h_prime(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(check_h_prime_available, + &input->h_prime_available, &status, + hdcp, "h_prime_available")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime, + &input->h_prime_read, &status, + hdcp, "h_prime_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime, + &input->h_prime_validation, &status, + hdcp, "h_prime_validation")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init, + &input->lc_init_prepare, &status, + hdcp, "lc_init_prepare")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init, + &input->lc_init_write, &status, + hdcp, "lc_init_write")) + goto out; + if (is_dp_hdcp(hdcp)) + udelay(16000); + else + if (!mod_hdcp_execute_and_set(poll_l_prime_available, + &input->l_prime_available_poll, &status, + hdcp, "l_prime_available_poll")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime, + &input->l_prime_read, &status, + hdcp, "l_prime_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime, + &input->l_prime_validation, &status, + hdcp, "l_prime_validation")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status exchange_ks_and_test_for_repeater(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_eks, + &input->eks_prepare, &status, + hdcp, "eks_prepare")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_eks, + &input->eks_write, &status, + hdcp, "eks_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status enable_encryption(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { + process_rxstatus(hdcp, event_ctx, input, &status); + goto out; + } + + if (is_hdmi_dvi_sl_hdcp(hdcp)) { + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (event_ctx->rx_id_list_ready) + goto out; + } + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_enable_encryption, + &input->enable_encryption, &status, + hdcp, "enable_encryption")) + goto out; + if (is_dp_mst_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp2_enable_dp_stream_encryption, + &input->stream_encryption_dp, &status, + hdcp, "stream_encryption_dp")) + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (event_ctx->rx_id_list_ready) + goto out; +out: + return status; +} + +static enum mod_hdcp_status wait_for_rx_id_list(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (!event_ctx->rx_id_list_ready) { + status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY; + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status verify_rx_id_list_and_send_ack(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { + process_rxstatus(hdcp, event_ctx, input, &status); + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_rx_id_list, + &input->rx_id_list_read, + &status, hdcp, "receiver_id_list_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_device_count, + &input->device_count_check, + &status, hdcp, "device_count_check")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_rx_id_list, + &input->rx_id_list_validation, + &status, hdcp, "rx_id_list_validation")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_repeater_auth_ack, + &input->repeater_auth_ack_write, + &status, hdcp, "repeater_auth_ack_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status send_stream_management(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { + process_rxstatus(hdcp, event_ctx, input, &status); + goto out; + } + + if (is_hdmi_dvi_sl_hdcp(hdcp)) { + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (event_ctx->rx_id_list_ready) + goto out; + } + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_stream_management, + &input->prepare_stream_manage, + &status, hdcp, "prepare_stream_manage")) + goto out; + + if (!mod_hdcp_execute_and_set(mod_hdcp_write_stream_manage, + &input->stream_manage_write, + &status, hdcp, "stream_manage_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { + process_rxstatus(hdcp, event_ctx, input, &status); + goto out; + } + + if (is_hdmi_dvi_sl_hdcp(hdcp)) { + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (event_ctx->rx_id_list_ready) { + goto out; + } + } + if (is_hdmi_dvi_sl_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set(check_stream_ready_available, + &input->stream_ready_available, + &status, hdcp, "stream_ready_available")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_stream_ready, + &input->stream_ready_read, + &status, hdcp, "stream_ready_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_stream_ready, + &input->stream_ready_validation, + &status, hdcp, "stream_ready_validation")) + goto out; + +out: + return status; +} + +static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxcaps, + &input->rx_caps_read_dp, + &status, hdcp, "rx_caps_read_dp")) + goto out; + if (!mod_hdcp_execute_and_set(check_hdcp2_capable, + &input->hdcp2_capable_check, &status, + hdcp, "hdcp2_capable_check")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status send_content_stream_type_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_content_type, + &input->content_stream_type_write, &status, + hdcp, "content_stream_type_write")) + goto out; +out: + return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case H2_A0_KNOWN_HDCP2_CAPABLE_RX: + status = known_hdcp2_capable_rx(hdcp, event_ctx, input); + break; + case H2_A1_SEND_AKE_INIT: + status = send_ake_init(hdcp, event_ctx, input); + break; + case H2_A1_VALIDATE_AKE_CERT: + status = validate_ake_cert(hdcp, event_ctx, input); + break; + case H2_A1_SEND_NO_STORED_KM: + status = send_no_stored_km(hdcp, event_ctx, input); + break; + case H2_A1_READ_H_PRIME: + status = read_h_prime(hdcp, event_ctx, input); + break; + case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + status = read_pairing_info_and_validate_h_prime(hdcp, + event_ctx, input); + break; + case H2_A1_SEND_STORED_KM: + status = send_stored_km(hdcp, event_ctx, input); + break; + case H2_A1_VALIDATE_H_PRIME: + status = validate_h_prime(hdcp, event_ctx, input); + break; + case H2_A2_LOCALITY_CHECK: + status = locality_check(hdcp, event_ctx, input); + break; + case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + status = exchange_ks_and_test_for_repeater(hdcp, event_ctx, input); + break; + case H2_ENABLE_ENCRYPTION: + status = enable_encryption(hdcp, event_ctx, input); + break; + case H2_A5_AUTHENTICATED: + status = authenticated(hdcp, event_ctx, input); + break; + case H2_A6_WAIT_FOR_RX_ID_LIST: + status = wait_for_rx_id_list(hdcp, event_ctx, input); + break; + case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input); + break; + case H2_A9_SEND_STREAM_MANAGEMENT: + status = send_stream_management(hdcp, event_ctx, input); + break; + case H2_A9_VALIDATE_STREAM_READY: + status = validate_stream_ready(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case D2_A0_DETERMINE_RX_HDCP_CAPABLE: + status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input); + break; + case D2_A1_SEND_AKE_INIT: + status = send_ake_init(hdcp, event_ctx, input); + break; + case D2_A1_VALIDATE_AKE_CERT: + status = validate_ake_cert(hdcp, event_ctx, input); + break; + case D2_A1_SEND_NO_STORED_KM: + status = send_no_stored_km(hdcp, event_ctx, input); + break; + case D2_A1_READ_H_PRIME: + status = read_h_prime(hdcp, event_ctx, input); + break; + case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + status = read_pairing_info_and_validate_h_prime(hdcp, + event_ctx, input); + break; + case D2_A1_SEND_STORED_KM: + status = send_stored_km(hdcp, event_ctx, input); + break; + case D2_A1_VALIDATE_H_PRIME: + status = validate_h_prime(hdcp, event_ctx, input); + break; + case D2_A2_LOCALITY_CHECK: + status = locality_check(hdcp, event_ctx, input); + break; + case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + status = exchange_ks_and_test_for_repeater(hdcp, + event_ctx, input); + break; + case D2_SEND_CONTENT_STREAM_TYPE: + status = send_content_stream_type_dp(hdcp, event_ctx, input); + break; + case D2_ENABLE_ENCRYPTION: + status = enable_encryption(hdcp, event_ctx, input); + break; + case D2_A5_AUTHENTICATED: + status = authenticated(hdcp, event_ctx, input); + break; + case D2_A6_WAIT_FOR_RX_ID_LIST: + status = wait_for_rx_id_list(hdcp, event_ctx, input); + break; + case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input); + break; + case D2_A9_SEND_STREAM_MANAGEMENT: + status = send_stream_management(hdcp, event_ctx, input); + break; + case D2_A9_VALIDATE_STREAM_READY: + status = validate_stream_ready(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c new file mode 100644 index 000000000000..94a0e5fa931b --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c @@ -0,0 +1,674 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case H2_A0_KNOWN_HDCP2_CAPABLE_RX: + if (input->hdcp2version_read != PASS || + input->hdcp2_capable_check != PASS) { + adjust->hdcp2.disable = 1; + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_SEND_AKE_INIT); + } + break; + case H2_A1_SEND_AKE_INIT: + if (input->add_topology != PASS || + input->create_session != PASS || + input->ake_init_prepare != PASS) { + /* out of sync with psp state */ + adjust->hdcp2.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ake_init_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 100, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_VALIDATE_AKE_CERT); + break; + case H2_A1_VALIDATE_AKE_CERT: + if (input->ake_cert_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1A-08: consider ake timeout a failure */ + /* some hdmi receivers are not ready for HDCP + * immediately after video becomes active, + * delay 1s before retry on first HDCP message + * timeout. + */ + fail_and_restart_in_ms(1000, &status, output); + } else { + /* continue ake cert polling*/ + callback_in_ms(10, output); + increment_stay_counter(hdcp); + } + break; + } else if (input->ake_cert_read != PASS || + input->ake_cert_validation != PASS) { + /* + * 1A-09: consider invalid ake cert a failure + * 1A-10: consider receiver id listed in SRM a failure + */ + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_km_stored && + !adjust->hdcp2.force_no_stored_km) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_SEND_STORED_KM); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_SEND_NO_STORED_KM); + } + break; + case H2_A1_SEND_NO_STORED_KM: + if (input->no_stored_km_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (adjust->hdcp2.increase_h_prime_timeout) + set_watchdog_in_ms(hdcp, 2000, output); + else + set_watchdog_in_ms(hdcp, 1000, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_READ_H_PRIME); + break; + case H2_A1_READ_H_PRIME: + if (input->h_prime_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1A-11-3: consider h' timeout a failure */ + fail_and_restart_in_ms(0, &status, output); + } else { + /* continue h' polling */ + callback_in_ms(100, output); + increment_stay_counter(hdcp); + } + break; + } else if (input->h_prime_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 200, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME); + break; + case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + if (input->pairing_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1A-12: consider pairing info timeout + * a failure + */ + fail_and_restart_in_ms(0, &status, output); + } else { + /* continue pairing info polling */ + callback_in_ms(20, output); + increment_stay_counter(hdcp); + } + break; + } else if (input->pairing_info_read != PASS || + input->h_prime_validation != PASS) { + /* 1A-11-1: consider invalid h' a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK); + break; + case H2_A1_SEND_STORED_KM: + if (input->stored_km_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 200, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_VALIDATE_H_PRIME); + break; + case H2_A1_VALIDATE_H_PRIME: + if (input->h_prime_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1A-11-2: consider h' timeout a failure */ + fail_and_restart_in_ms(0, &status, output); + } else { + /* continue h' polling */ + callback_in_ms(20, output); + increment_stay_counter(hdcp); + } + break; + } else if (input->h_prime_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->h_prime_validation != PASS) { + /* 1A-11-1: consider invalid h' a failure */ + adjust->hdcp2.force_no_stored_km = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK); + break; + case H2_A2_LOCALITY_CHECK: + if (hdcp->state.stay_count > 10 || + input->lc_init_prepare != PASS || + input->lc_init_write != PASS || + input->l_prime_available_poll != PASS || + input->l_prime_read != PASS) { + /* + * 1A-05: consider disconnection after LC init a failure + * 1A-13-1: consider invalid l' a failure + * 1A-13-2: consider l' timeout a failure + */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->l_prime_validation != PASS) { + callback_in_ms(0, output); + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER); + break; + case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + if (input->eks_prepare != PASS || + input->eks_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + set_watchdog_in_ms(hdcp, 3000, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A6_WAIT_FOR_RX_ID_LIST); + } else { + /* some CTS equipment requires a delay GREATER than + * 200 ms, so delay 210 ms instead of 200 ms + */ + callback_in_ms(210, output); + set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION); + } + break; + case H2_ENABLE_ENCRYPTION: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + /* + * 1A-07: restart hdcp on REAUTH_REQ + * 1B-08: restart hdcp on REAUTH_REQ + */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->enable_encryption != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A5_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + case H2_A5_AUTHENTICATED: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } + callback_in_ms(500, output); + increment_stay_counter(hdcp); + break; + case H2_A6_WAIT_FOR_RX_ID_LIST: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (!event_ctx->rx_id_list_ready) { + if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-02: consider rx id list timeout a failure */ + /* some CTS equipment's actual timeout + * measurement is slightly greater than 3000 ms. + * Delay 100 ms to ensure it is fully timeout + * before re-authentication. + */ + fail_and_restart_in_ms(100, &status, output); + } else { + callback_in_ms(300, output); + increment_stay_counter(hdcp); + } + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->rx_id_list_read != PASS || + input->device_count_check != PASS || + input->rx_id_list_validation != PASS || + input->repeater_auth_ack_write != PASS) { + /* 1B-03: consider invalid v' a failure + * 1B-04: consider MAX_DEVS_EXCEEDED a failure + * 1B-05: consider MAX_CASCADE_EXCEEDED a failure + * 1B-06: consider invalid seq_num_V a failure + * 1B-09: consider seq_num_V rollover a failure + */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT); + break; + case H2_A9_SEND_STREAM_MANAGEMENT: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->prepare_stream_manage != PASS || + input->stream_manage_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 100, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A9_VALIDATE_STREAM_READY); + break; + case H2_A9_VALIDATE_STREAM_READY: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->stream_ready_available != PASS) { + if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-10-2: restart content stream management on + * stream ready timeout + */ + hdcp->auth.count.stream_management_retry_count++; + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT); + } else { + callback_in_ms(10, output); + increment_stay_counter(hdcp); + } + break; + } else if (input->stream_ready_read != PASS || + input->stream_ready_validation != PASS) { + /* + * 1B-10-1: restart content stream management + * on invalid M' + */ + if (hdcp->auth.count.stream_management_retry_count > 10) { + fail_and_restart_in_ms(0, &status, output); + } else { + hdcp->auth.count.stream_management_retry_count++; + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT); + } + break; + } + callback_in_ms(200, output); + set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + fail_and_restart_in_ms(0, &status, output); + break; + } + + return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case D2_A0_DETERMINE_RX_HDCP_CAPABLE: + if (input->rx_caps_read_dp != PASS || + input->hdcp2_capable_check != PASS) { + adjust->hdcp2.disable = 1; + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A1_SEND_AKE_INIT); + } + break; + case D2_A1_SEND_AKE_INIT: + if (input->add_topology != PASS || + input->create_session != PASS || + input->ake_init_prepare != PASS) { + /* out of sync with psp state */ + adjust->hdcp2.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ake_init_write != PASS) { + /* possibly display not ready */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(100, output); + set_state_id(hdcp, output, D2_A1_VALIDATE_AKE_CERT); + break; + case D2_A1_VALIDATE_AKE_CERT: + if (input->ake_cert_read != PASS || + input->ake_cert_validation != PASS) { + /* + * 1A-08: consider invalid ake cert a failure + * 1A-09: consider receiver id listed in SRM a failure + */ + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_km_stored && + !adjust->hdcp2.force_no_stored_km) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A1_SEND_STORED_KM); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A1_SEND_NO_STORED_KM); + } + break; + case D2_A1_SEND_NO_STORED_KM: + if (input->no_stored_km_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (adjust->hdcp2.increase_h_prime_timeout) + set_watchdog_in_ms(hdcp, 2000, output); + else + set_watchdog_in_ms(hdcp, 1000, output); + set_state_id(hdcp, output, D2_A1_READ_H_PRIME); + break; + case D2_A1_READ_H_PRIME: + if (input->h_prime_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + /* 1A-10-3: consider h' timeout a failure */ + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } else if (input->h_prime_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 200, output); + set_state_id(hdcp, output, D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME); + break; + case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + if (input->pairing_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + /* + * 1A-11: consider pairing info timeout + * a failure + */ + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } else if (input->pairing_info_read != PASS || + input->h_prime_validation != PASS) { + /* 1A-10-1: consider invalid h' a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK); + break; + case D2_A1_SEND_STORED_KM: + if (input->stored_km_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 200, output); + set_state_id(hdcp, output, D2_A1_VALIDATE_H_PRIME); + break; + case D2_A1_VALIDATE_H_PRIME: + if (input->h_prime_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + /* 1A-10-2: consider h' timeout a failure */ + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } else if (input->h_prime_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->h_prime_validation != PASS) { + /* 1A-10-1: consider invalid h' a failure */ + adjust->hdcp2.force_no_stored_km = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK); + break; + case D2_A2_LOCALITY_CHECK: + if (hdcp->state.stay_count > 10 || + input->lc_init_prepare != PASS || + input->lc_init_write != PASS || + input->l_prime_read != PASS) { + /* 1A-12: consider invalid l' a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->l_prime_validation != PASS) { + callback_in_ms(0, output); + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER); + break; + case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + if (input->eks_prepare != PASS || + input->eks_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + set_watchdog_in_ms(hdcp, 3000, output); + set_state_id(hdcp, output, D2_A6_WAIT_FOR_RX_ID_LIST); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_SEND_CONTENT_STREAM_TYPE); + } + break; + case D2_SEND_CONTENT_STREAM_TYPE: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS || + input->content_stream_type_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(210, output); + set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION); + break; + case D2_ENABLE_ENCRYPTION: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS) { + /* + * 1A-07: restart hdcp on REAUTH_REQ + * 1B-08: restart hdcp on REAUTH_REQ + */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->enable_encryption != PASS || + (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_state_id(hdcp, output, D2_A5_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + case D2_A5_AUTHENTICATED: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->link_integrity_check_dp != PASS) { + if (hdcp->connection.hdcp2_retry_count >= 1) + adjust->hdcp2.disable_type1 = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } + increment_stay_counter(hdcp); + break; + case D2_A6_WAIT_FOR_RX_ID_LIST: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (!event_ctx->rx_id_list_ready) { + if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + /* 1B-02: consider rx id list timeout a failure */ + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS || + input->rx_id_list_read != PASS || + input->device_count_check != PASS || + input->rx_id_list_validation != PASS || + input->repeater_auth_ack_write != PASS) { + /* + * 1B-03: consider invalid v' a failure + * 1B-04: consider MAX_DEVS_EXCEEDED a failure + * 1B-05: consider MAX_CASCADE_EXCEEDED a failure + * 1B-06: consider invalid seq_num_V a failure + * 1B-09: consider seq_num_V rollover a failure + */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT); + break; + case D2_A9_SEND_STREAM_MANAGEMENT: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->prepare_stream_manage != PASS || + input->stream_manage_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(100, output); + set_state_id(hdcp, output, D2_A9_VALIDATE_STREAM_READY); + break; + case D2_A9_VALIDATE_STREAM_READY: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->stream_ready_read != PASS || + input->stream_ready_validation != PASS) { + /* + * 1B-10-1: restart content stream management + * on invalid M' + * 1B-10-2: consider stream ready timeout a failure + */ + if (hdcp->auth.count.stream_management_retry_count > 10) { + fail_and_restart_in_ms(0, &status, output); + } else { + hdcp->auth.count.stream_management_retry_count++; + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT); + } + break; + } + callback_in_ms(200, output); + set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + fail_and_restart_in_ms(0, &status, output); + break; + } + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index dea21702edff..97ecbf5bfec1 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -77,6 +77,7 @@ enum mod_hdcp_status { MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING, MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING, MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE, + MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED, MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE, MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE, MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE, @@ -86,6 +87,7 @@ enum mod_hdcp_status { MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE, MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY, MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE, + MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED, MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION, MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING, MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE, -- cgit From 46df9931e0db8d1f397fc7f38a13159885f8d526 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 18 Sep 2019 11:24:09 -0400 Subject: drm/amd/display: Add logging for HDCP2.2 [Why] We need to log the state changes for 2.2 This patch extends the existing logging functions to handle HDCP2.2. [How] We do this by adding if/else in the defines, and output the log based on the hdcp version Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/modules/hdcp/hdcp_log.c | 118 +++++++++++++++++++++ .../gpu/drm/amd/display/modules/hdcp/hdcp_log.h | 94 ++++++++++++---- .../gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 4 + 3 files changed, 196 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c index 3982ced5f969..724ebcee9a19 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -116,6 +116,58 @@ char *mod_hdcp_status_to_str(int32_t status) return "MOD_HDCP_STATUS_DDC_FAILURE"; case MOD_HDCP_STATUS_INVALID_OPERATION: return "MOD_HDCP_STATUS_INVALID_OPERATION"; + case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE: + return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE"; + case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING: + return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING"; + case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING: + return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING"; + case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING: + return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED: + return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING: + return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED: + return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED"; + case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY: + return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY"; + case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION: + return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION"; + case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING: + return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST: + return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST"; + case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE"; default: return "MOD_HDCP_STATUS_UNKNOWN"; } @@ -156,6 +208,72 @@ char *mod_hdcp_state_id_to_str(int32_t id) return "D1_A6_WAIT_FOR_READY"; case D1_A7_READ_KSV_LIST: return "D1_A7_READ_KSV_LIST"; + case H2_A0_KNOWN_HDCP2_CAPABLE_RX: + return "H2_A0_KNOWN_HDCP2_CAPABLE_RX"; + case H2_A1_SEND_AKE_INIT: + return "H2_A1_SEND_AKE_INIT"; + case H2_A1_VALIDATE_AKE_CERT: + return "H2_A1_VALIDATE_AKE_CERT"; + case H2_A1_SEND_NO_STORED_KM: + return "H2_A1_SEND_NO_STORED_KM"; + case H2_A1_READ_H_PRIME: + return "H2_A1_READ_H_PRIME"; + case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + return "H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME"; + case H2_A1_SEND_STORED_KM: + return "H2_A1_SEND_STORED_KM"; + case H2_A1_VALIDATE_H_PRIME: + return "H2_A1_VALIDATE_H_PRIME"; + case H2_A2_LOCALITY_CHECK: + return "H2_A2_LOCALITY_CHECK"; + case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + return "H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER"; + case H2_ENABLE_ENCRYPTION: + return "H2_ENABLE_ENCRYPTION"; + case H2_A5_AUTHENTICATED: + return "H2_A5_AUTHENTICATED"; + case H2_A6_WAIT_FOR_RX_ID_LIST: + return "H2_A6_WAIT_FOR_RX_ID_LIST"; + case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + return "H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK"; + case H2_A9_SEND_STREAM_MANAGEMENT: + return "H2_A9_SEND_STREAM_MANAGEMENT"; + case H2_A9_VALIDATE_STREAM_READY: + return "H2_A9_VALIDATE_STREAM_READY"; + case D2_A0_DETERMINE_RX_HDCP_CAPABLE: + return "D2_A0_DETERMINE_RX_HDCP_CAPABLE"; + case D2_A1_SEND_AKE_INIT: + return "D2_A1_SEND_AKE_INIT"; + case D2_A1_VALIDATE_AKE_CERT: + return "D2_A1_VALIDATE_AKE_CERT"; + case D2_A1_SEND_NO_STORED_KM: + return "D2_A1_SEND_NO_STORED_KM"; + case D2_A1_READ_H_PRIME: + return "D2_A1_READ_H_PRIME"; + case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + return "D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME"; + case D2_A1_SEND_STORED_KM: + return "D2_A1_SEND_STORED_KM"; + case D2_A1_VALIDATE_H_PRIME: + return "D2_A1_VALIDATE_H_PRIME"; + case D2_A2_LOCALITY_CHECK: + return "D2_A2_LOCALITY_CHECK"; + case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + return "D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER"; + case D2_SEND_CONTENT_STREAM_TYPE: + return "D2_SEND_CONTENT_STREAM_TYPE"; + case D2_ENABLE_ENCRYPTION: + return "D2_ENABLE_ENCRYPTION"; + case D2_A5_AUTHENTICATED: + return "D2_A5_AUTHENTICATED"; + case D2_A6_WAIT_FOR_RX_ID_LIST: + return "D2_A6_WAIT_FOR_RX_ID_LIST"; + case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + return "D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK"; + case D2_A9_SEND_STREAM_MANAGEMENT: + return "D2_A9_SEND_STREAM_MANAGEMENT"; + case D2_A9_VALIDATE_STREAM_READY: + return "D2_A9_VALIDATE_STREAM_READY"; default: return "UNKNOWN_STATE_ID"; }; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index 2fd0e0a893ef..b29322e7d5fe 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -45,6 +45,10 @@ HDCP_LOG_VER(hdcp, \ "[Link %d] HDCP 1.4 enabled on display %d", \ hdcp->config.index, displayIndex) +#define HDCP_HDCP2_ENABLED_TRACE(hdcp, displayIndex) \ + HDCP_LOG_VER(hdcp, \ + "[Link %d] HDCP 2.2 enabled on display %d", \ + hdcp->config.index, displayIndex) /* state machine logs */ #define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \ HDCP_LOG_FSM(hdcp, \ @@ -93,26 +97,73 @@ hdcp->buf); \ } while (0) #define HDCP_FULL_DDC_TRACE(hdcp) do { \ - HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \ - sizeof(hdcp->auth.msg.hdcp1.bksv)); \ - HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ - sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ - HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ - sizeof(hdcp->auth.msg.hdcp1.an)); \ - HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ - sizeof(hdcp->auth.msg.hdcp1.aksv)); \ - HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \ - sizeof(hdcp->auth.msg.hdcp1.ainfo)); \ - HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \ - (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \ - sizeof(hdcp->auth.msg.hdcp1.r0p)); \ - HDCP_DDC_READ_TRACE(hdcp, "BINFO", \ - (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \ - sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \ - HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \ - hdcp->auth.msg.hdcp1.ksvlist_size); \ - HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \ - sizeof(hdcp->auth.msg.hdcp1.vp)); \ + if (is_hdcp1(hdcp)) { \ + HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \ + sizeof(hdcp->auth.msg.hdcp1.bksv)); \ + HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ + sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ + sizeof(hdcp->auth.msg.hdcp1.an)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ + sizeof(hdcp->auth.msg.hdcp1.aksv)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \ + sizeof(hdcp->auth.msg.hdcp1.ainfo)); \ + HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \ + sizeof(hdcp->auth.msg.hdcp1.r0p)); \ + HDCP_DDC_READ_TRACE(hdcp, "BINFO", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \ + sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \ + HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \ + hdcp->auth.msg.hdcp1.ksvlist_size); \ + HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \ + sizeof(hdcp->auth.msg.hdcp1.vp)); \ + } else { \ + HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", \ + &hdcp->auth.msg.hdcp2.hdcp2version_hdmi, \ + sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); \ + HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, \ + sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, \ + sizeof(hdcp->auth.msg.hdcp2.ake_init)); \ + HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, \ + sizeof(hdcp->auth.msg.hdcp2.ake_cert)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", \ + hdcp->auth.msg.hdcp2.ake_stored_km, \ + sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", \ + hdcp->auth.msg.hdcp2.ake_no_stored_km, \ + sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); \ + HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, \ + sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); \ + HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", \ + hdcp->auth.msg.hdcp2.ake_pairing_info, \ + sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, \ + sizeof(hdcp->auth.msg.hdcp2.lc_init)); \ + HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, \ + sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, \ + sizeof(hdcp->auth.msg.hdcp2.ske_eks)); \ + HDCP_DDC_READ_TRACE(hdcp, "Rx Status", \ + (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, \ + sizeof(hdcp->auth.msg.hdcp2.rxstatus)); \ + HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", \ + hdcp->auth.msg.hdcp2.rx_id_list, \ + hdcp->auth.msg.hdcp2.rx_id_list_size); \ + HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", \ + hdcp->auth.msg.hdcp2.repeater_auth_ack, \ + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", \ + hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, \ + hdcp->auth.msg.hdcp2.stream_manage_size); \ + HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", \ + hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, \ + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", \ + hdcp->auth.msg.hdcp2.content_stream_type_dp, \ + sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); \ + } \ } while (0) #define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \ HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \ @@ -123,6 +174,9 @@ #define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \ HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \ hdcp->config.index) +#define HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp2 session", \ + hdcp->config.index) #define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \ HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index) #define HDCP_TOP_RESET_CONN_TRACE(hdcp) \ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index ddba0cfa5722..a365cf00bc4c 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -393,6 +393,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE; + HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp); + return MOD_HDCP_STATUS_SUCCESS; } @@ -649,6 +651,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp) if (!is_dp_mst_hdcp(hdcp)) { display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index); } return MOD_HDCP_STATUS_SUCCESS; @@ -727,6 +730,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp break; hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); } return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS -- cgit From 2e956df2913f3452b4d590016f5364fb7255284a Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 18 Sep 2019 11:24:39 -0400 Subject: drm/amd/display: Change ERROR to WARN for HDCP module [Why] HDCP is a bit finicky so we try it 3 times, this leads to a case where if we fail the first time and pass the second time the error is still shown in dmesg for the first failed attempt. This leads to false positive errors. [How] Change the logging from ERROR to WARNING. Warnings are still shown in dmesg to know what went wrong. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index b29322e7d5fe..ff91373ebada 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -27,7 +27,7 @@ #define MOD_HDCP_LOG_H_ #ifdef CONFIG_DRM_AMD_DC_HDCP -#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__) +#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__) #define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) #define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) #define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__) @@ -37,7 +37,7 @@ /* default logs */ #define HDCP_ERROR_TRACE(hdcp, status) \ HDCP_LOG_ERR(hdcp, \ - "[Link %d] ERROR %s IN STATE %s", \ + "[Link %d] WARNING %s IN STATE %s", \ hdcp->config.index, \ mod_hdcp_status_to_str(status), \ mod_hdcp_state_id_to_str(hdcp->state.id)) -- cgit From da283469e05fbe8749fbaeb4f2ad0f46e1faaeab Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Tue, 23 Jul 2019 11:25:10 -0400 Subject: drm/amd/display: Enable HDCP 2.2 [Why] HDCP 2.2 was disabled, we need to enable it [How] -Update display topology to support 2.2 -Unset hdcp2.disable in update_config -Change logic of event_update_property, now we set the property to be ENABLED for any level of encryption (2.2 or 1.4). Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 3 +-- drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 77181ddf6c8e..970f2d58c6dc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -190,7 +190,7 @@ static void event_property_update(struct work_struct *work) } } - if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON) + if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); else drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); @@ -294,7 +294,6 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->dig_be = config->link_enc_inst; link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; - link->adjust.hdcp2.disable = 1; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index a365cf00bc4c..a9511612f426 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -109,7 +109,7 @@ enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp) dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe; dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id; dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version = - TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x; + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2; dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; psp_dtm_invoke(psp, dtm_cmd->cmd_id); -- cgit From 53e108aa9916dbbdebd3d94ce63a6928fda09899 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Fri, 16 Aug 2019 14:49:05 -0400 Subject: drm/amd/display: Handle hdcp2.2 type0/1 in dm [Why] HDCP 2.2 uses type0 and type1 content type. This is passed to the receiver to stream the proper content. For example, in a MST case if the main device is HDCP2.2 capable but the secondary device is only 1.4 capabale we can use Type0 Type0 content: use HDCP 1.4 or HDCP2.2 type0 Type1 content: Only use HDCP 2.2 type1 [How] We use the "hdcp content type" property in drm. We use the disable_type1 flag in hdcp module to select the type based on the properties. For updating the property we use the same logic as 1.4, but now we consider content_type as well and update the property if the requirements are met Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 18 ++++++++++++++---- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 17 +++++++++++++---- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h | 4 ++-- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c | 5 ++++- 4 files changed, 33 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 11b9c65e1ee8..d368bb8d712b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -43,6 +43,7 @@ #include "amdgpu_dm.h" #ifdef CONFIG_DRM_AMD_DC_HDCP #include "amdgpu_dm_hdcp.h" +#include #endif #include "amdgpu_pm.h" @@ -5581,7 +5582,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, &aconnector->base); #ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->asic_type >= CHIP_RAVEN) - drm_connector_attach_content_protection_property(&aconnector->base, false); + drm_connector_attach_content_protection_property(&aconnector->base, true); #endif } } @@ -5832,6 +5833,12 @@ static bool is_content_protection_different(struct drm_connector_state *state, { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + if (old_state->hdcp_content_type != state->hdcp_content_type && + state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + return true; + } + /* CP is being re enabled, ignore this */ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { @@ -5864,11 +5871,14 @@ static void update_content_protection(struct drm_connector_state *state, const s struct hdcp_workqueue *hdcp_w) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + bool disable_type1 = state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 ? true : false; - if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) - hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector); - else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + hdcp_reset_display(hdcp_w, aconnector->dc_link->link_index); + hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector, disable_type1); + } else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index); + } } #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 970f2d58c6dc..a2ad1390977d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -87,7 +87,8 @@ static void process_output(struct hdcp_workqueue *hdcp_work) } -void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector) +void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, + bool disable_type1) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct mod_hdcp_display *display = &hdcp_work[link_index].display; @@ -96,6 +97,8 @@ void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, mutex_lock(&hdcp_w->mutex); hdcp_w->aconnector = aconnector; + hdcp_w->link.adjust.hdcp2.disable_type1 = disable_type1; + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); @@ -190,10 +193,16 @@ static void event_property_update(struct work_struct *work) } } - if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) - drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); - else + if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { + if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && + hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); + else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && + hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); + } else { drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); + } mutex_unlock(&hdcp_work->mutex); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index d3ba505d0696..098f7218f83a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -54,8 +54,8 @@ struct hdcp_workqueue { uint8_t max_link; }; -void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, - struct amdgpu_dm_connector *aconnector); +void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, + bool disable_type1); void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index); void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index a74812977963..0f2f242710b3 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -417,7 +417,10 @@ enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp, query->trace = &hdcp->connection.trace; query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status); + if (is_hdcp1(hdcp)) + mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status); + else if (is_hdcp2(hdcp)) + mod_hdcp_hdcp2_get_link_encryption_status(hdcp, &query->encryption_status); out: return status; -- cgit From b1abe5586ffcb15cc51668bd1a0f97adfbfb0acd Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 28 Aug 2019 15:10:03 -0400 Subject: drm/amd/display: Refactor HDCP to handle multiple displays per link [Why] We need to do this to support HDCP over MST Currently we save a display per link, in a MST case we need to save multiple displays per link. [How] We can create an array per link to cache the displays, but it complicates the design. Instead we can use the module to cache the displays. Now we will always add all the displays to the module, but we use the adjustment flag to disable hdcp on all of them before they are added. When we want to enable hdcp we just query the display(cache), remove it then add it back with different adjustments. Its the similar for disable. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 20 ++------ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 53 ++++++++++++---------- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h | 9 ++-- 3 files changed, 40 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d368bb8d712b..eb7eb128744c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5867,20 +5867,6 @@ static bool is_content_protection_different(struct drm_connector_state *state, return false; } -static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector, - struct hdcp_workqueue *hdcp_w) -{ - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - bool disable_type1 = state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 ? true : false; - - if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { - hdcp_reset_display(hdcp_w, aconnector->dc_link->link_index); - hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector, disable_type1); - } else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index); - } - -} #endif static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, @@ -6850,7 +6836,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) - update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue); + hdcp_update_display( + adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, + new_con_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 ? true : false, + new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true + : false); } #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index a2ad1390977d..53e382bff54d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -87,43 +87,45 @@ static void process_output(struct hdcp_workqueue *hdcp_work) } -void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, - bool disable_type1) +void hdcp_update_display(struct hdcp_workqueue *hdcp_work, + unsigned int link_index, + struct amdgpu_dm_connector *aconnector, + bool disable_type1, + bool enable_encryption) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; + struct mod_hdcp_display_query query; mutex_lock(&hdcp_w->mutex); hdcp_w->aconnector = aconnector; - hdcp_w->link.adjust.hdcp2.disable_type1 = disable_type1; - - mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); - - schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); - - process_output(hdcp_w); - - mutex_unlock(&hdcp_w->mutex); - -} - -void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, unsigned int display_index) -{ - struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; - - mutex_lock(&hdcp_w->mutex); + query.display = NULL; + mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); + + if (query.display != NULL) { + memcpy(display, query.display, sizeof(struct mod_hdcp_display)); + mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); + + if (enable_encryption) { + display->adjust.disable = 0; + hdcp_w->link.adjust.hdcp2.disable_type1 = disable_type1; + schedule_delayed_work(&hdcp_w->property_validate_dwork, + msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + } else { + display->adjust.disable = 1; + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + cancel_delayed_work(&hdcp_w->property_validate_dwork); + } - mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output); + display->state = MOD_HDCP_DISPLAY_ACTIVE; + } - cancel_delayed_work(&hdcp_w->property_validate_dwork); - hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); process_output(hdcp_w); - mutex_unlock(&hdcp_w->mutex); - } void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) @@ -303,7 +305,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->dig_be = config->link_enc_inst; link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + display->adjust.disable = 1; + link->adjust.auth_delay = 2; + hdcp_update_display(hdcp_work, link_index, aconnector, false, false); } struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index 098f7218f83a..71e121f037cb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -54,9 +54,12 @@ struct hdcp_workqueue { uint8_t max_link; }; -void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, - bool disable_type1); -void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index); +void hdcp_update_display(struct hdcp_workqueue *hdcp_work, + unsigned int link_index, + struct amdgpu_dm_connector *aconnector, + bool disable_type1, + bool enable_encryption); + void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); void hdcp_destroy(struct hdcp_workqueue *work); -- cgit From 23eb41917fc9f2d8d77ecd557646deb577ec8270 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Thu, 29 Aug 2019 15:26:54 -0400 Subject: drm/amd/display: add force Type0/1 flag [Why] Before we had a disable_type1 flag, this forced HDCP 2.2 to type0 There was no way to force type1. [How] Remove disable_type1 flag and instead add a flag to force type0/1. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 12 +++++++++--- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h | 2 +- drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c | 2 +- drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 7 +++++-- drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h | 11 +++++++++-- 6 files changed, 26 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index eb7eb128744c..85318463b8a6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6838,7 +6838,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) hdcp_update_display( adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, - new_con_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 ? true : false, + new_con_state->hdcp_content_type, new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true : false); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 53e382bff54d..244a8e80334a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -90,7 +90,7 @@ static void process_output(struct hdcp_workqueue *hdcp_work) void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, - bool disable_type1, + uint8_t content_type, bool enable_encryption) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -108,9 +108,15 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, memcpy(display, query.display, sizeof(struct mod_hdcp_display)); mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); + hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + if (enable_encryption) { display->adjust.disable = 0; - hdcp_w->link.adjust.hdcp2.disable_type1 = disable_type1; + if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) + hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) + hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; + schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); } else { @@ -308,7 +314,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) display->adjust.disable = 1; link->adjust.auth_delay = 2; - hdcp_update_display(hdcp_work, link_index, aconnector, false, false); + hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false); } struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index 71e121f037cb..6abde86bce4a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -57,7 +57,7 @@ struct hdcp_workqueue { void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, - bool disable_type1, + uint8_t content_type, bool enable_encryption); void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c index 94a0e5fa931b..e8043c903a84 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c @@ -570,7 +570,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, break; } else if (input->link_integrity_check_dp != PASS) { if (hdcp->connection.hdcp2_retry_count >= 1) - adjust->hdcp2.disable_type1 = 1; + adjust->hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; fail_and_restart_in_ms(0, &status, output); break; } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index a9511612f426..2dd5feec8e6c 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -358,10 +358,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; - if (hdcp->connection.link.adjust.hdcp2.disable_type1) + if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0; - else + else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1) + hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1; + else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_MAX) hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index 97ecbf5bfec1..ff2bb2bfbb53 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -158,12 +158,18 @@ struct mod_hdcp_link_adjustment_hdcp1 { uint8_t reserved : 6; }; +enum mod_hdcp_force_hdcp_type { + MOD_HDCP_FORCE_TYPE_MAX = 0, + MOD_HDCP_FORCE_TYPE_0, + MOD_HDCP_FORCE_TYPE_1 +}; + struct mod_hdcp_link_adjustment_hdcp2 { uint8_t disable : 1; - uint8_t disable_type1 : 1; + uint8_t force_type : 2; uint8_t force_no_stored_km : 1; uint8_t increase_h_prime_timeout: 1; - uint8_t reserved : 4; + uint8_t reserved : 3; }; struct mod_hdcp_link_adjustment { @@ -185,6 +191,7 @@ struct mod_hdcp_trace { enum mod_hdcp_encryption_status { MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0, MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON, MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON, MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON }; -- cgit From 45375a501f1387567cccd4bbd78d6bbea8d5b68c Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 4 Sep 2019 16:52:20 -0400 Subject: drm/amd/display: Refactor HDCP encryption status update [Why] The old way was to poll PSP and update the properties. But due to a limitation in the PSP interface this doesn't work for MST. [How] According to PSP if set_encryption return success, the link is encrypted and the only way it will not be is if we get a link loss(which we handle already). So this method should be good enough to report HDCP status. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 3 +-- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c | 18 ++++++++++++++---- drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h | 4 ++-- 3 files changed, 17 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 244a8e80334a..f6864a51891a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -85,6 +85,7 @@ static void process_output(struct hdcp_workqueue *hdcp_work) schedule_delayed_work(&hdcp_work->watchdog_timer_dwork, msecs_to_jiffies(output.watchdog_timer_delay)); + schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0)); } void hdcp_update_display(struct hdcp_workqueue *hdcp_work, @@ -234,8 +235,6 @@ static void event_property_validate(struct work_struct *work) schedule_work(&hdcp_work->property_update_work); } - schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); - mutex_unlock(&hdcp_work->mutex); } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index 0f2f242710b3..cbb5e9c063ec 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -417,10 +417,20 @@ enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp, query->trace = &hdcp->connection.trace; query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - if (is_hdcp1(hdcp)) - mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status); - else if (is_hdcp2(hdcp)) - mod_hdcp_hdcp2_get_link_encryption_status(hdcp, &query->encryption_status); + if (is_display_encryption_enabled(display)) { + if (is_hdcp1(hdcp)) { + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON; + } else if (is_hdcp2(hdcp)) { + if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON; + else if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1) + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON; + else + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON; + } + } else { + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } out: return status; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index ff2bb2bfbb53..f2a0e1a064da 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -191,9 +191,9 @@ struct mod_hdcp_trace { enum mod_hdcp_encryption_status { MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0, MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON, - MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON, MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON, - MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON }; /* per link events dm has to notify to hdcp module */ -- cgit From 02837a91ae7584e2ac13df816bd0e82bc71ebe73 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Mon, 21 Oct 2019 14:40:55 -0400 Subject: drm/amd/display: add and use defines from drm_hdcp.h [Why] These defines/macros exist already no need to redefine them [How] Use the defines/macros from drm_hdcp.h -we share the rxstatus between HDMI and DP (2 bytes), But upstream defines/macros for HDMI are for 1 byte. So we need to create a separate rxstatus for HDMI Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h | 40 +++++++++------------- .../drm/amd/display/modules/hdcp/hdcp1_execution.c | 22 ++++++------ .../drm/amd/display/modules/hdcp/hdcp2_execution.c | 24 ++++++------- 3 files changed, 37 insertions(+), 49 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index 9887c5ea6d5f..bfb32afc1868 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -29,34 +29,28 @@ #include "mod_hdcp.h" #include "hdcp_log.h" -#define BCAPS_READY_MASK 0x20 -#define BCAPS_REPEATER_MASK 0x40 -#define BSTATUS_DEVICE_COUNT_MASK 0X007F -#define BSTATUS_MAX_DEVS_EXCEEDED_MASK 0x0080 +#include +#include + +/* TODO: + * Replace below defines with these + * + * #define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) + * #define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) + * #define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3) + * #define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2)) + * #define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) + * + * Currently we share rx_status between HDMI and DP, so we use 16bits + * The upstream defines work with 1bytes at a time. So we need to + * split the HDMI rxstatus into 2bytes before we can use usptream defs + */ + #define BSTATUS_MAX_CASCADE_EXCEEDED_MASK 0x0800 -#define BCAPS_HDCP_CAPABLE_MASK_DP 0x01 -#define BCAPS_REPEATER_MASK_DP 0x02 -#define BSTATUS_READY_MASK_DP 0x01 -#define BSTATUS_R0_P_AVAILABLE_MASK_DP 0x02 -#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x04 -#define BSTATUS_REAUTH_REQUEST_MASK_DP 0x08 -#define BINFO_DEVICE_COUNT_MASK_DP 0X007F -#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP 0x0080 #define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800 - -#define VERSION_HDCP2_MASK 0x04 #define RXSTATUS_MSG_SIZE_MASK 0x03FF #define RXSTATUS_READY_MASK 0x0400 #define RXSTATUS_REAUTH_REQUEST_MASK 0x0800 -#define RXIDLIST_DEVICE_COUNT_LOWER_MASK 0xf0 -#define RXIDLIST_DEVICE_COUNT_UPPER_MASK 0x01 -#define RXCAPS_BYTE2_HDCP2_VERSION_DP 0x02 -#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP 0x02 -#define RXSTATUS_READY_MASK_DP 0x0001 -#define RXSTATUS_H_P_AVAILABLE_MASK_DP 0x0002 -#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP 0x0004 -#define RXSTATUS_REAUTH_REQUEST_MASK_DP 0x0008 -#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x0010 enum mod_hdcp_trans_input_result { UNKNOWN = 0, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 3db4a7da414f..4618abd6504f 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -41,17 +41,17 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp) { if (is_dp_hdcp(hdcp)) - return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ? + return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_READY) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; - return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ? + return (hdcp->auth.msg.hdcp1.bcaps & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; } static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp) { - return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ? + return (hdcp->auth.msg.hdcp1.bcaps & DP_BCAPS_HDCP_CAPABLE) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE; } @@ -61,7 +61,7 @@ static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { status = (hdcp->auth.msg.hdcp1.bstatus & - BSTATUS_R0_P_AVAILABLE_MASK_DP) ? + DP_BSTATUS_R0_PRIME_READY) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING; } else { @@ -74,7 +74,7 @@ static inline enum mod_hdcp_status check_link_integrity_dp( struct mod_hdcp *hdcp) { return (hdcp->auth.msg.hdcp1.bstatus & - BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ? + DP_BSTATUS_LINK_FAILURE) ? MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE : MOD_HDCP_STATUS_SUCCESS; } @@ -82,7 +82,7 @@ static inline enum mod_hdcp_status check_link_integrity_dp( static inline enum mod_hdcp_status check_no_reauthentication_request_dp( struct mod_hdcp *hdcp) { - return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ? + return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_REAUTH_REQ) ? MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED : MOD_HDCP_STATUS_SUCCESS; } @@ -109,13 +109,11 @@ static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) - status = (hdcp->auth.msg.hdcp1.binfo_dp & - BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ? + status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp) ? MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE : MOD_HDCP_STATUS_SUCCESS; else - status = (hdcp->auth.msg.hdcp1.bstatus & - BSTATUS_MAX_DEVS_EXCEEDED_MASK) ? + status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus) ? MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE : MOD_HDCP_STATUS_SUCCESS; return status; @@ -124,8 +122,8 @@ static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp) static inline uint8_t get_device_count(struct mod_hdcp *hdcp) { return is_dp_hdcp(hdcp) ? - (hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) : - (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK); + DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.binfo_dp) : + DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.bstatus); } static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index c93c8098d972..7513b3b3c353 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -30,7 +30,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp uint8_t is_ready = 0; if (is_dp_hdcp(hdcp)) - is_ready = (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_READY_MASK_DP) ? 1 : 0; + is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus) ? 1 : 0; else is_ready = ((hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_READY_MASK) && (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK)) ? 1 : 0; @@ -43,14 +43,12 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) - status = ((hdcp->auth.msg.hdcp2.rxcaps_dp[2] & - RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP) && - (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == - RXCAPS_BYTE2_HDCP2_VERSION_DP)) ? + status = (hdcp->auth.msg.hdcp2.rxcaps_dp[2] & HDCP_2_2_RX_CAPS_VERSION_VAL) && + HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[0]) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; else - status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & VERSION_HDCP2_MASK) ? + status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; return status; @@ -62,8 +60,7 @@ static inline enum mod_hdcp_status check_reauthentication_request( uint8_t ret = 0; if (is_dp_hdcp(hdcp)) - ret = (hdcp->auth.msg.hdcp2.rxstatus & - RXSTATUS_REAUTH_REQUEST_MASK_DP) ? + ret = HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus) ? MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : MOD_HDCP_STATUS_SUCCESS; else @@ -76,8 +73,7 @@ static inline enum mod_hdcp_status check_reauthentication_request( static inline enum mod_hdcp_status check_link_integrity_failure_dp( struct mod_hdcp *hdcp) { - return (hdcp->auth.msg.hdcp2.rxstatus & - RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ? + return HDCP_2_2_DP_RXSTATUS_LINK_FAILED(hdcp->auth.msg.hdcp2.rxstatus) ? MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE : MOD_HDCP_STATUS_SUCCESS; } @@ -111,7 +107,7 @@ static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) goto out; if (is_dp_hdcp(hdcp)) { - status = (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_H_P_AVAILABLE_MASK_DP) ? + status = HDCP_2_2_DP_RXSTATUS_H_PRIME(hdcp->auth.msg.hdcp2.rxstatus) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; } else { @@ -134,7 +130,7 @@ static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) goto out; if (is_dp_hdcp(hdcp)) { - status = (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_PAIRING_AVAILABLE_MASK_DP) ? + status = HDCP_2_2_DP_RXSTATUS_PAIRING(hdcp->auth.msg.hdcp2.rxstatus) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; } else { @@ -197,8 +193,8 @@ out: static inline uint8_t get_device_count(struct mod_hdcp *hdcp) { - return ((hdcp->auth.msg.hdcp2.rx_id_list[2] & RXIDLIST_DEVICE_COUNT_LOWER_MASK) >> 4) + - ((hdcp->auth.msg.hdcp2.rx_id_list[1] & RXIDLIST_DEVICE_COUNT_UPPER_MASK) << 4); + return HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) + + (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4); } static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) -- cgit From 1c40428ace2eba5144b3f13f91df4a0c0422c68d Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Thu, 24 Oct 2019 16:07:43 -0400 Subject: drm/amd/display: use drm defines for MAX CASCADE MASK [Why] drm already has this define [How] drm Mask is 0x08 vs 0x0800. The reason is because drm mask works on a byte. ^^ =======|| || Since the first byte is always zero we can ignore it and only check the second byte. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h | 4 ---- drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c | 14 ++++++-------- 2 files changed, 6 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index bfb32afc1868..f6bba487d1d4 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -35,8 +35,6 @@ /* TODO: * Replace below defines with these * - * #define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) - * #define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) * #define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3) * #define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2)) * #define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) @@ -46,8 +44,6 @@ * split the HDMI rxstatus into 2bytes before we can use usptream defs */ -#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK 0x0800 -#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800 #define RXSTATUS_MSG_SIZE_MASK 0x03FF #define RXSTATUS_READY_MASK 0x0400 #define RXSTATUS_REAUTH_REQUEST_MASK 0x0800 diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 4618abd6504f..4d11041a8c6f 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -92,15 +92,13 @@ static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) - status = (hdcp->auth.msg.hdcp1.binfo_dp & - BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ? - MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE : - MOD_HDCP_STATUS_SUCCESS; + status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp >> 8) + ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE + : MOD_HDCP_STATUS_SUCCESS; else - status = (hdcp->auth.msg.hdcp1.bstatus & - BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ? - MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE : - MOD_HDCP_STATUS_SUCCESS; + status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus >> 8) + ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE + : MOD_HDCP_STATUS_SUCCESS; return status; } -- cgit From 302169003733f168f9de5c0d677c0cd82d1be107 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Thu, 24 Oct 2019 16:07:58 -0400 Subject: drm/amd/display: split rxstatus for hdmi and dp [Why] Currently we share rxstatus between HDMI and DP, so we use 16bits The drm defines work with 1bytes at a time. So we need to split the HDMI rxstatus into 2bytes before we can use drm defines [How] -create rxstatus for dp and hdmi. rxstatus for hdmi is split into bytes using arrays. -use drm_hdcp defines for the remaining structs Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h | 20 ++----------- .../drm/amd/display/modules/hdcp/hdcp2_execution.c | 35 +++++++++++++--------- .../gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c | 2 +- 3 files changed, 24 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index f6bba487d1d4..f98d3d9ecb6d 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -32,22 +32,6 @@ #include #include -/* TODO: - * Replace below defines with these - * - * #define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3) - * #define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2)) - * #define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) - * - * Currently we share rx_status between HDMI and DP, so we use 16bits - * The upstream defines work with 1bytes at a time. So we need to - * split the HDMI rxstatus into 2bytes before we can use usptream defs - */ - -#define RXSTATUS_MSG_SIZE_MASK 0x03FF -#define RXSTATUS_READY_MASK 0x0400 -#define RXSTATUS_REAUTH_REQUEST_MASK 0x0800 - enum mod_hdcp_trans_input_result { UNKNOWN = 0, PASS, @@ -150,7 +134,7 @@ struct mod_hdcp_message_hdcp1 { struct mod_hdcp_message_hdcp2 { uint8_t hdcp2version_hdmi; uint8_t rxcaps_dp[3]; - uint16_t rxstatus; + uint8_t rxstatus[2]; uint8_t ake_init[12]; uint8_t ake_cert[534]; @@ -167,7 +151,7 @@ struct mod_hdcp_message_hdcp2 { uint8_t repeater_auth_stream_manage[68]; // 6 + 2 * 31 uint16_t stream_manage_size; uint8_t repeater_auth_stream_ready[33]; - + uint8_t rxstatus_dp; uint8_t content_stream_type_dp[2]; }; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 7513b3b3c353..110c8620907b 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -30,10 +30,11 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp uint8_t is_ready = 0; if (is_dp_hdcp(hdcp)) - is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus) ? 1 : 0; + is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0; else - is_ready = ((hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_READY_MASK) && - (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK)) ? 1 : 0; + is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) && + (HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0; return is_ready ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY; } @@ -60,11 +61,11 @@ static inline enum mod_hdcp_status check_reauthentication_request( uint8_t ret = 0; if (is_dp_hdcp(hdcp)) - ret = HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus) ? + ret = HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus_dp) ? MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : MOD_HDCP_STATUS_SUCCESS; else - ret = (hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_REAUTH_REQUEST_MASK) ? + ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ? MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : MOD_HDCP_STATUS_SUCCESS; return ret; @@ -73,7 +74,7 @@ static inline enum mod_hdcp_status check_reauthentication_request( static inline enum mod_hdcp_status check_link_integrity_failure_dp( struct mod_hdcp *hdcp) { - return HDCP_2_2_DP_RXSTATUS_LINK_FAILED(hdcp->auth.msg.hdcp2.rxstatus) ? + return HDCP_2_2_DP_RXSTATUS_LINK_FAILED(hdcp->auth.msg.hdcp2.rxstatus_dp) ? MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE : MOD_HDCP_STATUS_SUCCESS; } @@ -88,7 +89,8 @@ static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp) } else { status = mod_hdcp_read_rxstatus(hdcp); if (status == MOD_HDCP_STATUS_SUCCESS) { - size = hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK; + size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING; @@ -107,11 +109,12 @@ static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) goto out; if (is_dp_hdcp(hdcp)) { - status = HDCP_2_2_DP_RXSTATUS_H_PRIME(hdcp->auth.msg.hdcp2.rxstatus) ? + status = HDCP_2_2_DP_RXSTATUS_H_PRIME(hdcp->auth.msg.hdcp2.rxstatus_dp) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; } else { - size = hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK; + size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; @@ -130,11 +133,12 @@ static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) goto out; if (is_dp_hdcp(hdcp)) { - status = HDCP_2_2_DP_RXSTATUS_PAIRING(hdcp->auth.msg.hdcp2.rxstatus) ? + status = HDCP_2_2_DP_RXSTATUS_PAIRING(hdcp->auth.msg.hdcp2.rxstatus_dp) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; } else { - size = hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK; + size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; @@ -161,7 +165,8 @@ static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) if (status != MOD_HDCP_STATUS_SUCCESS) break; - size = hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK; + size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING; @@ -182,7 +187,8 @@ static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp) status = mod_hdcp_read_rxstatus(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) goto out; - size = hdcp->auth.msg.hdcp2.rxstatus & RXSTATUS_MSG_SIZE_MASK; + size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING; @@ -234,7 +240,8 @@ static uint8_t process_rxstatus(struct mod_hdcp *hdcp, sizeof(hdcp->auth.msg.hdcp2.rx_id_list); else hdcp->auth.msg.hdcp2.rx_id_list_size = - hdcp->auth.msg.hdcp2.rxstatus & 0x3FF; + HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; } out: return (*status == MOD_HDCP_STATUS_SUCCESS); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c index 8059aff9911f..ff9d54812e62 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -390,7 +390,7 @@ enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp) if (is_dp_hdcp(hdcp)) { status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, - (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, + &hdcp->auth.msg.hdcp2.rxstatus_dp, 1); } else { status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, -- cgit From a1acc5d0942c827d38b008f2c06fcb4e03dda76d Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 6 Nov 2019 14:58:45 -0500 Subject: drm/amd/display: Fix static analysis bug in validate_bksv [Why] static analysis throws the error below Out-of-bounds read (OVERRUN) Overrunning array of 5 bytes at byte offset 7 by dereferencing pointer (uint64_t *)hdcp->auth.msg.hdcp1.bksv. var n is going to contain r0p and bcaps. if they are non-zero the count will be wrong How] Use memcpy instead to avoid this. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 4d11041a8c6f..04845e43df15 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -27,9 +27,11 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) { - uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv; + uint64_t n = 0; uint8_t count = 0; + memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t)); + while (n) { count++; n &= (n - 1); -- cgit From 5111195ad61791bb22d8650ad7431344d9a00036 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Tue, 12 Nov 2019 09:13:20 -0500 Subject: drm/amd/display: Fix coding error in connector atomic check [why] For MST connector atomic check we have to check a new CRTC state instead of an old one, when checking if CRTC is disabled to release VCPI slots allocated. Signed-off-by: Mikita Lipski Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 205531ca686f..81367c869134 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -273,7 +273,7 @@ static int dm_dp_mst_atomic_check(struct drm_connector *connector, return 0; if (new_conn_state->crtc) { - new_crtc_state = drm_atomic_get_old_crtc_state(state, new_conn_state->crtc); + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); if (!new_crtc_state || !drm_atomic_crtc_needs_modeset(new_crtc_state) || new_crtc_state->enable) -- cgit From b992691d453c1cb16390d21c9f557196d3ab3141 Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Mon, 18 Nov 2019 10:53:54 +1100 Subject: drm/amdgpu: fix bad DMA from INTERRUPT_CNTL2 The INTERRUPT_CNTL2 register expects a valid DMA address, but is currently set with a GPU MC address. This can cause problems on systems that detect the resulting DMA read from an invalid address (found on a Power8 guest). Instead, use the DMA address of the dummy page because it will always be safe. Fixes: 27ae10641e9c ("drm/amdgpu: add interupt handler implementation for si v3") Signed-off-by: Sam Bobroff Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/si_ih.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 57bb5f9e08b2..88ae27a5a03d 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev) u32 interrupt_cntl, ih_cntl, ih_rb_cntl; si_ih_disable_interrupts(adev); - WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; -- cgit From 16641949257a5644a0f9f2d2f454663c9a37841e Mon Sep 17 00:00:00 2001 From: zhengbin Date: Mon, 18 Nov 2019 17:00:31 +0800 Subject: drm/amdgpu: remove not needed memset Fixes coccicheck warning: drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c:64:13-31: WARNING: dma_alloc_coherent use in ih -> ring already zeroes out memory, so memset is not needed Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 6d8f05511aba..111a301ce878 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -66,7 +66,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, if (ih->ring == NULL) return -ENOMEM; - memset((void *)ih->ring, 0, ih->ring_size + 8); ih->gpu_addr = dma_addr; ih->wptr_addr = dma_addr + ih->ring_size; ih->wptr_cpu = &ih->ring[ih->ring_size / 4]; -- cgit From 4a11327822006f56b673f4fb06f1718bcb9a700a Mon Sep 17 00:00:00 2001 From: Chen Wandun Date: Mon, 18 Nov 2019 16:03:34 +0800 Subject: drm/amd/powerplay: return errno code to caller when error occur return errno code to caller when error occur, and meanwhile remove gcc '-Wunused-but-set-variable' warning. drivers/gpu/drm/amd/amdgpu/../powerplay/smumgr/vegam_smumgr.c: In function vegam_populate_smc_boot_level: drivers/gpu/drm/amd/amdgpu/../powerplay/smumgr/vegam_smumgr.c:1364:6: warning: variable result set but not used [-Wunused-but-set-variable] Reviewed-by: Evan Quan Signed-off-by: Chen Wandun Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 2068eb00d2f8..50896e9b2579 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -1371,11 +1371,16 @@ static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr, result = phm_find_boot_level(&(data->dpm_table.sclk_table), data->vbios_boot_state.sclk_bootup_value, (uint32_t *)&(table->GraphicsBootLevel)); + if (result) + return result; result = phm_find_boot_level(&(data->dpm_table.mclk_table), data->vbios_boot_state.mclk_bootup_value, (uint32_t *)&(table->MemoryBootLevel)); + if (result) + return result; + table->BootVddc = data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE; table->BootVddci = data->vbios_boot_state.vddci_bootup_value * -- cgit From 85f8433f9bd72ea4e10523058f1cdcda59c4629c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 13 Nov 2019 11:08:35 -0500 Subject: drm/amdgpu/powerplay: properly set PP_GFXOFF_MASK (v2) So that the setting reflects what the hw supports. This will be used in a subsequent patch so needs to be correct. v2: squash in fix from Colin Ian King Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205497 Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 2 ++ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 9 +++++++++ 2 files changed, 11 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 223c5a794561..b73561957d24 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -730,6 +730,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA20: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; vega20_set_ppt_funcs(smu); break; case CHIP_NAVI10: @@ -738,6 +739,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) navi10_set_ppt_funcs(smu); break; case CHIP_ARCTURUS: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; arcturus_set_ppt_funcs(smu); /* OD is not supported on Arcturus */ smu->od_enabled =false; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index a24beaa4fb01..d2909c91d65b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -81,6 +81,8 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) int hwmgr_early_init(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev; + if (!hwmgr) return -EINVAL; @@ -94,8 +96,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) hwmgr_init_workload_prority(hwmgr); hwmgr->gfxoff_state_changed_by_workload = false; + adev = hwmgr->adev; + switch (hwmgr->chip_family) { case AMDGPU_FAMILY_CI: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &ci_smu_funcs; ci_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | @@ -106,12 +111,14 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) smu7_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_CZ: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->od_enabled = false; hwmgr->smumgr_funcs = &smu8_smu_funcs; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; smu8_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_VI: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; switch (hwmgr->chip_id) { case CHIP_TOPAZ: @@ -153,6 +160,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case AMDGPU_FAMILY_AI: switch (hwmgr->chip_id) { case CHIP_VEGA10: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &vega10_smu_funcs; vega10_hwmgr_init(hwmgr); @@ -162,6 +170,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) vega12_hwmgr_init(hwmgr); break; case CHIP_VEGA20: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &vega20_smu_funcs; vega20_hwmgr_init(hwmgr); -- cgit From ca9317b9183b939e40903deb95dfe862b4653ee9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Nov 2019 11:39:05 -0500 Subject: drm/amdgpu: disable gfxoff when using register read interface When gfxoff is enabled, accessing gfx registers via MMIO can lead to a hang. Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205497 Acked-by: Xiaojie Yuan Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 39e37a53cb90..31d039a3f8c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -664,15 +664,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return -ENOMEM; alloc_size = info->read_mmr_reg.count * sizeof(*regs); - for (i = 0; i < info->read_mmr_reg.count; i++) + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < info->read_mmr_reg.count; i++) { if (amdgpu_asic_read_register(adev, se_num, sh_num, info->read_mmr_reg.dword_offset + i, ®s[i])) { DRM_DEBUG_KMS("unallowed offset %#x\n", info->read_mmr_reg.dword_offset + i); kfree(regs); + amdgpu_gfx_off_ctrl(adev, true); return -EFAULT; } + } + amdgpu_gfx_off_ctrl(adev, true); n = copy_to_user(out, regs, min(size, alloc_size)); kfree(regs); return n ? -EFAULT : 0; -- cgit From b62d955426e0a235eb7db306b4d49e242aa5eaa4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Nov 2019 09:38:28 -0500 Subject: drm/amdgpu: remove experimental flag for Navi14 5.4 and newer works fine with navi14. Reviewed-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 557be89421a8..0ffc9447b573 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -998,10 +998,10 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, /* Navi14 */ - {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, + {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, + {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, + {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, /* Renoir */ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, -- cgit From 3f2a06ac814e84fba16592286ec99364098cdd07 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Nov 2019 10:21:23 -0500 Subject: drm/amdgpu: disable gfxoff on original raven MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are still combinations of sbios and firmware that are not stable. Bug: https://bugzilla.kernel.org/show_bug.cgi?id=204689 Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3ebd5c20dfd3..3e4ac2f06c3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1051,8 +1051,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) case CHIP_VEGA20: break; case CHIP_RAVEN: - if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) - &&((adev->gfx.rlc_fw_version != 106 && + /* Disable GFXOFF on original raven. There are combinations + * of sbios and platforms that are not stable. + */ + if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) + &&((adev->gfx.rlc_fw_version != 106 && adev->gfx.rlc_fw_version < 531) || (adev->gfx.rlc_fw_version == 53815) || (adev->gfx.rlc_feature_version < 1) || -- cgit From 2aa87ba56892c840b77a0fc500c4814e6d52488e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Nov 2019 10:26:52 -0500 Subject: Revert "drm/amd/display: enable S/G for RAVEN chip" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 1c4259159132ae4ceaf7c6db37a6cf76417f73d9. S/G display is not stable with the IOMMU enabled on some platforms. Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205523 Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 6a27027a6f20..4e699071d144 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -514,7 +514,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, * Also, don't allow GTT domain if the BO doens't have USWC falg set. */ if (adev->asic_type >= CHIP_CARRIZO && - adev->asic_type <= CHIP_RAVEN && + adev->asic_type < CHIP_RAVEN && (adev->flags & AMD_IS_APU) && (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && amdgpu_bo_support_uswc(bo_flags) && diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 85318463b8a6..4d4bf4fd7b6c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -930,7 +930,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) */ if (adev->flags & AMD_IS_APU && adev->asic_type >= CHIP_CARRIZO && - adev->asic_type <= CHIP_RAVEN) + adev->asic_type < CHIP_RAVEN) init_data.flags.gpu_vm_support = true; if (amdgpu_dc_feature_mask & DC_FBC_MASK) -- cgit From b4f8285a1960f133cc679445049bb75e5c65a33f Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 18 Nov 2019 17:04:24 +0800 Subject: drm/amd/powerplay: correct swSMU baco reset related settings Added bif doorbell interrupt setting and applied different settings for BACO reset for RAS recovery. Signed-off-by: Evan Quan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index fc9679ea2368..b3e84f65f8fe 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1668,6 +1668,10 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) { struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + uint32_t bif_doorbell_intr_cntl; + uint32_t data; int ret = 0; if (smu_v11_0_baco_get_state(smu) == state) @@ -1675,10 +1679,30 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) mutex_lock(&smu_baco->mutex); - if (state == SMU_BACO_STATE_ENTER) - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO); - else + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + + if (state == SMU_BACO_STATE_ENTER) { + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, + DOORBELL_INTERRUPT_DISABLE, 1); + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + if (!ras || !ras->supported) { + data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); + data |= 0x80000000; + WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0); + } else { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1); + } + } else { ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco); + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, + DOORBELL_INTERRUPT_DISABLE, 0); + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + } if (ret) goto out; -- cgit From 0a650c1d35e2ff6487976bca3e009f63479716b2 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 4 Nov 2019 17:31:29 +0800 Subject: drm/amd/powerplay: add Arcturus baco reset support Enable baco reset support on Arcturus. Signed-off-by: Evan Quan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 7 +++++++ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 14 ++++++++++---- 3 files changed, 18 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 44add7509fb0..1d19812a5c38 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -559,6 +559,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev) return AMD_RESET_METHOD_MODE2; case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_ARCTURUS: soc15_asic_get_baco_capability(adev, &baco_reset); break; case CHIP_VEGA20: diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 58c7c4a3053e..1af0b1ab6f70 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -495,6 +495,7 @@ static int arcturus_store_powerplay_table(struct smu_context *smu) { struct smu_11_0_powerplay_table *powerplay_table = NULL; struct smu_table_context *table_context = &smu->smu_table; + struct smu_baco_context *smu_baco = &smu->smu_baco; int ret = 0; if (!table_context->power_play_table) @@ -507,6 +508,12 @@ static int arcturus_store_powerplay_table(struct smu_context *smu) table_context->thermal_controller_type = powerplay_table->thermal_controller_type; + mutex_lock(&smu_baco->mutex); + if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || + powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) + smu_baco->platform_support = true; + mutex_unlock(&smu_baco->mutex); + return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index b3e84f65f8fe..8b1c4164a1f6 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1642,7 +1642,9 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu) if (!baco_support) return false; - if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) + /* Arcturus does not support this bit mask */ + if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && + !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) return false; val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); @@ -1714,11 +1716,15 @@ out: int smu_v11_0_baco_reset(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; - ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); - if (ret) - return ret; + /* Arcturus does not need this audio workaround */ + if (adev->asic_type != CHIP_ARCTURUS) { + ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); + if (ret) + return ret; + } ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); if (ret) -- cgit From 32cc3bf0a77c0e600a3df33f1661da4dee7ceadb Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 19 Nov 2019 11:43:45 +0800 Subject: drm/amd/powerplay: add missing header file declaration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This can fix the compile errors below: drivers/gpu/drm/amd/amdgpu/../powerplay/smu_v11_0.c: In function ‘smu_v11_0_baco_set_state’: drivers/gpu/drm/amd/amdgpu/../powerplay/smu_v11_0.c:1674:27: error: implicit declaration of function ‘amdgpu_ras_get_context’ [-Werror=implicit-function-declaration] struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); ^ drivers/gpu/drm/amd/amdgpu/../powerplay/smu_v11_0.c:1674:27: warning: initialization makes pointer from integer without a cast [-Wint-conversion] drivers/gpu/drm/amd/amdgpu/../powerplay/smu_v11_0.c:1692:19: error: dereferencing pointer to incomplete type ‘struct amdgpu_ras’ if (!ras || !ras->supported) { Signed-off-by: Evan Quan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 8b1c4164a1f6..f677743cb511 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -37,6 +37,7 @@ #include "soc15_common.h" #include "atom.h" #include "amd_pcie.h" +#include "amdgpu_ras.h" #include "asic_reg/thm/thm_11_0_2_offset.h" #include "asic_reg/thm/thm_11_0_2_sh_mask.h" -- cgit From 13a390a6f98abed327ec794a70830f066f3b6507 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 18 Nov 2019 17:13:56 +0800 Subject: drm/amdgpu: add psp funcs for ring write pointer read/write The ring write pointer regsiter update is the only part that is IP specific ones in psp_cmd_submit function. Add two callbacks for wptr read/write so that we unify the psp_cmd_submit function for all the ASICs. Signed-off-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 5 +++++ drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 16 ++++++++++++++++ drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 26 ++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/psp_v12_0.c | 26 ++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 27 +++++++++++++++++++++++++++ 5 files changed, 100 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 09c5474ebcc3..d5620c46f3fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -116,6 +116,8 @@ struct psp_funcs int (*mem_training_init)(struct psp_context *psp); void (*mem_training_fini)(struct psp_context *psp); int (*mem_training)(struct psp_context *psp, uint32_t ops); + uint32_t (*ring_get_wptr)(struct psp_context *psp); + void (*ring_set_wptr)(struct psp_context *psp, uint32_t value); }; #define AMDGPU_XGMI_MAX_CONNECTED_NODES 64 @@ -346,6 +348,9 @@ struct amdgpu_psp_funcs { ((psp)->funcs->ras_cure_posion ? \ (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL) +#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp)) +#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value)) + extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index b345e69ba246..4b8fdddc4c46 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -407,6 +407,20 @@ static int psp_v10_0_mode1_reset(struct psp_context *psp) return -EINVAL; } +static uint32_t psp_v10_0_ring_get_wptr(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + return RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); +} + +static void psp_v10_0_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + static const struct psp_funcs psp_v10_0_funcs = { .init_microcode = psp_v10_0_init_microcode, .ring_init = psp_v10_0_ring_init, @@ -416,6 +430,8 @@ static const struct psp_funcs psp_v10_0_funcs = { .cmd_submit = psp_v10_0_cmd_submit, .compare_sram_data = psp_v10_0_compare_sram_data, .mode1_reset = psp_v10_0_mode1_reset, + .ring_get_wptr = psp_v10_0_ring_get_wptr, + .ring_set_wptr = psp_v10_0_ring_set_wptr, }; void psp_v10_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index ffeaa2f5588d..5cd2733aa2ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -1068,6 +1068,30 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) return 0; } +static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (psp_v11_0_support_vmr_ring(psp)) + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + else + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + + return data; +} + +static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (psp_v11_0_support_vmr_ring(psp)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_kdb = psp_v11_0_bootloader_load_kdb, @@ -1091,6 +1115,8 @@ static const struct psp_funcs psp_v11_0_funcs = { .mem_training_init = psp_v11_0_memory_training_init, .mem_training_fini = psp_v11_0_memory_training_fini, .mem_training = psp_v11_0_memory_training, + .ring_get_wptr = psp_v11_0_ring_get_wptr, + .ring_set_wptr = psp_v11_0_ring_set_wptr, }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index 8f553f6f92d6..75b3f9d15a18 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -547,6 +547,30 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp) return 0; } +static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (psp_v12_0_support_vmr_ring(psp)) + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + else + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + + return data; +} + +static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (psp_v12_0_support_vmr_ring(psp)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + static const struct psp_funcs psp_v12_0_funcs = { .init_microcode = psp_v12_0_init_microcode, .bootloader_load_sysdrv = psp_v12_0_bootloader_load_sysdrv, @@ -558,6 +582,8 @@ static const struct psp_funcs psp_v12_0_funcs = { .cmd_submit = psp_v12_0_cmd_submit, .compare_sram_data = psp_v12_0_compare_sram_data, .mode1_reset = psp_v12_0_mode1_reset, + .ring_get_wptr = psp_v12_0_ring_get_wptr, + .ring_set_wptr = psp_v12_0_ring_set_wptr, }; void psp_v12_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index fdc00938327b..bee6514f04a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -642,6 +642,31 @@ static bool psp_v3_1_support_vmr_ring(struct psp_context *psp) return false; } +static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (psp_v3_1_support_vmr_ring(psp)) + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + else + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + return data; +} + +static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (psp_v3_1_support_vmr_ring(psp)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); + /* send interrupt to PSP for SRIOV ring write pointer update */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + static const struct psp_funcs psp_v3_1_funcs = { .init_microcode = psp_v3_1_init_microcode, .bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv, @@ -655,6 +680,8 @@ static const struct psp_funcs psp_v3_1_funcs = { .smu_reload_quirk = psp_v3_1_smu_reload_quirk, .mode1_reset = psp_v3_1_mode1_reset, .support_vmr_ring = psp_v3_1_support_vmr_ring, + .ring_get_wptr = psp_v3_1_ring_get_wptr, + .ring_set_wptr = psp_v3_1_ring_set_wptr, }; void psp_v3_1_set_psp_funcs(struct psp_context *psp) -- cgit From cc65176e511c4b988c91b4ab806fce66209b1a42 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 18 Nov 2019 17:03:12 +0800 Subject: drm/amdgpu: add helper func for psp ring cmd submission Except for ring wptr update, the psp ring cmd submission function shouldn't be IP specific one. Create a common helper function to be shared for all the ASICs. Signed-off-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 50 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 4 +++ 2 files changed, 54 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index b1662af0f0cf..88fb04da6026 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1722,6 +1722,56 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, return psp_execute_np_fw_load(&adev->psp, &ucode); } +int psp_ring_cmd_submit(struct psp_context *psp, + uint64_t cmd_buf_mc_addr, + uint64_t fence_mc_addr, + int index) +{ + unsigned int psp_write_ptr_reg = 0; + struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem; + struct psp_ring *ring = &psp->km_ring; + struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; + struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + + ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; + struct amdgpu_device *adev = psp->adev; + uint32_t ring_size_dw = ring->ring_size / 4; + uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; + + /* KM (GPCOM) prepare write pointer */ + psp_write_ptr_reg = psp_ring_get_wptr(psp); + + /* Update KM RB frame pointer to new frame */ + /* write_frame ptr increments by size of rb_frame in bytes */ + /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ + if ((psp_write_ptr_reg % ring_size_dw) == 0) + write_frame = ring_buffer_start; + else + write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); + /* Check invalid write_frame ptr address */ + if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { + DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", + ring_buffer_start, ring_buffer_end, write_frame); + DRM_ERROR("write_frame is pointing to address out of bounds\n"); + return -EINVAL; + } + + /* Initialize KM RB frame */ + memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); + + /* Update KM RB frame */ + write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); + write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); + write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); + write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); + write_frame->fence_value = index; + amdgpu_asic_flush_hdp(adev, NULL); + + /* Update the write Pointer in DWORDs */ + psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; + psp_ring_set_wptr(psp, psp_write_ptr_reg); + return 0; +} + static bool psp_check_fw_loading_status(struct amdgpu_device *adev, enum AMDGPU_UCODE_ID ucode_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index d5620c46f3fc..482e7675b7da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -377,4 +377,8 @@ int psp_rlc_autoload_start(struct psp_context *psp); extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, uint32_t value); +int psp_ring_cmd_submit(struct psp_context *psp, + uint64_t cmd_buf_mc_addr, + uint64_t fence_mc_addr, + int index); #endif -- cgit From 5bdd0b72d6088428da4745b43684f6dfa4ab6062 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 18 Nov 2019 17:39:55 +0800 Subject: drm/amdgpu: switch to common helper func for psp cmd submission Drop all the IP specific cmd_submit callback function and use the common helper instead Signed-off-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 5 --- drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 49 --------------------------- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 58 ------------------------------- drivers/gpu/drm/amd/amdgpu/psp_v12_0.c | 58 ------------------------------- drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 60 --------------------------------- 6 files changed, 1 insertion(+), 231 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 88fb04da6026..2a8a08aa6eaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -158,7 +158,7 @@ psp_cmd_submit_buf(struct psp_context *psp, memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); index = atomic_inc_return(&psp->fence_value); - ret = psp_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); + ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); if (ret) { atomic_dec(&psp->fence_value); mutex_unlock(&psp->mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 482e7675b7da..40594f27dab1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -94,9 +94,6 @@ struct psp_funcs enum psp_ring_type ring_type); int (*ring_destroy)(struct psp_context *psp, enum psp_ring_type ring_type); - int (*cmd_submit)(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index); bool (*compare_sram_data)(struct psp_context *psp, struct amdgpu_firmware_info *ucode, enum AMDGPU_UCODE_ID ucode_type); @@ -302,8 +299,6 @@ struct amdgpu_psp_funcs { #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type)) #define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type)) #define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type))) -#define psp_cmd_submit(psp, cmd_mc, fence_mc, index) \ - (psp)->funcs->cmd_submit((psp), (cmd_mc), (fence_mc), (index)) #define psp_compare_sram_data(psp, ucode, type) \ (psp)->funcs->compare_sram_data((psp), (ucode), (type)) #define psp_init_microcode(psp) \ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 4b8fdddc4c46..7539104175e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -230,54 +230,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v10_0_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - amdgpu_asic_flush_hdp(adev, NULL); - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - static int psp_v10_0_sram_map(struct amdgpu_device *adev, unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, @@ -427,7 +379,6 @@ static const struct psp_funcs psp_v10_0_funcs = { .ring_create = psp_v10_0_ring_create, .ring_stop = psp_v10_0_ring_stop, .ring_destroy = psp_v10_0_ring_destroy, - .cmd_submit = psp_v10_0_cmd_submit, .compare_sram_data = psp_v10_0_compare_sram_data, .mode1_reset = psp_v10_0_mode1_reset, .ring_get_wptr = psp_v10_0_ring_get_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 5cd2733aa2ea..2eaa197a9404 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -519,63 +519,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v11_0_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - if (psp_v11_0_support_vmr_ring(psp)) - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); - else - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - /* write_frame ptr increments by size of rb_frame in bytes */ - /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - amdgpu_asic_flush_hdp(adev, NULL); - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - if (psp_v11_0_support_vmr_ring(psp)) { - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); - } else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - static int psp_v11_0_sram_map(struct amdgpu_device *adev, unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, @@ -1101,7 +1044,6 @@ static const struct psp_funcs psp_v11_0_funcs = { .ring_create = psp_v11_0_ring_create, .ring_stop = psp_v11_0_ring_stop, .ring_destroy = psp_v11_0_ring_destroy, - .cmd_submit = psp_v11_0_cmd_submit, .compare_sram_data = psp_v11_0_compare_sram_data, .mode1_reset = psp_v11_0_mode1_reset, .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index 75b3f9d15a18..58d8b6d732e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -334,63 +334,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v12_0_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - if (psp_v12_0_support_vmr_ring(psp)) - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); - else - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - /* write_frame ptr increments by size of rb_frame in bytes */ - /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - amdgpu_asic_flush_hdp(adev, NULL); - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - if (psp_v12_0_support_vmr_ring(psp)) { - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); - } else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - static int psp_v12_0_sram_map(struct amdgpu_device *adev, unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, @@ -579,7 +522,6 @@ static const struct psp_funcs psp_v12_0_funcs = { .ring_create = psp_v12_0_ring_create, .ring_stop = psp_v12_0_ring_stop, .ring_destroy = psp_v12_0_ring_destroy, - .cmd_submit = psp_v12_0_cmd_submit, .compare_sram_data = psp_v12_0_compare_sram_data, .mode1_reset = psp_v12_0_mode1_reset, .ring_get_wptr = psp_v12_0_ring_get_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index bee6514f04a9..b4d6427aaa79 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -410,65 +410,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v3_1_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - if (psp_v3_1_support_vmr_ring(psp)) - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); - else - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - /* write_frame ptr increments by size of rb_frame in bytes */ - /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - amdgpu_asic_flush_hdp(adev, NULL); - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - if (psp_v3_1_support_vmr_ring(psp)) { - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); - /* send interrupt to PSP for SRIOV ring write pointer update */ - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, - GFX_CTRL_CMD_ID_CONSUME_CMD); - } else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - static int psp_v3_1_sram_map(struct amdgpu_device *adev, unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, @@ -675,7 +616,6 @@ static const struct psp_funcs psp_v3_1_funcs = { .ring_create = psp_v3_1_ring_create, .ring_stop = psp_v3_1_ring_stop, .ring_destroy = psp_v3_1_ring_destroy, - .cmd_submit = psp_v3_1_cmd_submit, .compare_sram_data = psp_v3_1_compare_sram_data, .smu_reload_quirk = psp_v3_1_smu_reload_quirk, .mode1_reset = psp_v3_1_mode1_reset, -- cgit From 858a2bbad6b0666cdd50e356383b8918e010002c Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 18 Nov 2019 18:17:12 +0800 Subject: drm/amdgpu: pull ras controller int status only when ras enabled ras_controller_irq and athub_err_event_irq are only registered when PCIE_BIF ras is marked as supported. as the result, the driver also just need pull the int status in such case. Signed-off-by: Hawking Zhang Reviewed-by: John Clements Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 30d540d23b77..5ed4227f304b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -55,6 +55,7 @@ #include "amdgpu_connectors.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_ras.h" #include @@ -162,13 +163,15 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg) * register to check whether the interrupt is triggered or not, and properly * ack the interrupt if it is there */ - if (adev->nbio.funcs && - adev->nbio.funcs->handle_ras_controller_intr_no_bifring) - adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev); - - if (adev->nbio.funcs && - adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring) - adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) { + if (adev->nbio.funcs && + adev->nbio.funcs->handle_ras_controller_intr_no_bifring) + adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev); + + if (adev->nbio.funcs && + adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring) + adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev); + } return ret; } -- cgit From 30b2c0cae4303a9691174968482c43bfedcb7d76 Mon Sep 17 00:00:00 2001 From: changzhu Date: Wed, 13 Nov 2019 17:17:09 +0800 Subject: drm/amd/powerplay: enable gpu_busy_percent sys interface for renoir (v2) To get the value of gpu_busy_percent, it needs to realize get_current_activity_percent and get_metrics_table. The framework of renoir smu is different from old ones like raven. It needs to realize get_current_activity_percent and get_metrics_table in renoir_ppt.c like navi10. v2: remove unused variable (Alex) Signed-off-by: changzhu Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h | 4 ++ drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 76 +++++++++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 33 ++++++++++++ 3 files changed, 113 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index 1745e0146fba..44c65dd8850d 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -62,6 +62,10 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate); int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable); +int smu_v12_0_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size); + uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu); int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable); diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 784903a313b7..b44ce143e895 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -139,6 +139,27 @@ static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index) return mapping.map_to; } +static int renoir_get_metrics_table(struct smu_context *smu, + SmuMetrics_t *metrics_table) +{ + struct smu_table_context *smu_table= &smu->smu_table; + int ret = 0; + + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)smu_table->metrics_table, false); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); + return ret; + } + smu_table->metrics_time = jiffies; + } + + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + + return ret; +} + static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables) { struct smu_table_context *smu_table = &smu->smu_table; @@ -154,6 +175,11 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables) if (!smu_table->clocks_table) return -ENOMEM; + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + return -ENOMEM; + smu_table->metrics_time = 0; + return 0; } @@ -386,6 +412,32 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { return ret; } +static int renoir_get_current_activity_percent(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + int ret = 0; + SmuMetrics_t metrics; + + if (!value) + return -EINVAL; + + ret = renoir_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + switch (sensor) { + case AMDGPU_PP_SENSOR_GPU_LOAD: + *value = metrics.AverageGfxActivity; + break; + default: + pr_err("Invalid sensor for retrieving clock activity\n"); + return -EINVAL; + } + + return 0; +} + static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile) { @@ -699,6 +751,29 @@ static int renoir_get_power_profile_mode(struct smu_context *smu, return size; } +static int renoir_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + + if (!data || !size) + return -EINVAL; + + mutex_lock(&smu->sensor_lock); + switch (sensor) { + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = renoir_get_current_activity_percent(smu, sensor, (uint32_t *)data); + *size = 4; + break; + default: + ret = smu_v12_0_read_sensor(smu, sensor, data, size); + } + mutex_unlock(&smu->sensor_lock); + + return ret; +} + static const struct pptable_funcs renoir_ppt_funcs = { .get_smu_msg_index = renoir_get_smu_msg_index, .get_smu_table_index = renoir_get_smu_table_index, @@ -719,6 +794,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .get_dpm_clock_table = renoir_get_dpm_clock_table, .set_watermarks_table = renoir_set_watermarks_table, .get_power_profile_mode = renoir_get_power_profile_mode, + .read_sensor = renoir_read_sensor, .check_fw_status = smu_v12_0_check_fw_status, .check_fw_version = smu_v12_0_check_fw_version, .powergate_sdma = smu_v12_0_powergate_sdma, diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 18b24f954380..045167311ae8 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -223,6 +223,39 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) SMU_MSG_SetGfxCGPG, enable ? 1 : 0); } +int smu_v12_0_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + + if(!data || !size) + return -EINVAL; + + switch (sensor) { + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_MIN_FAN_RPM: + *(uint32_t *)data = 0; + *size = 4; + break; + default: + ret = smu_common_read_sensor(smu, sensor, data, size); + break; + } + + if (ret) + *size = 0; + + return ret; +} + /** * smu_v12_0_get_gfxoff_status - get gfxoff status * -- cgit From 69d5436d4dedab8c5dda0334b65b7edb84050860 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Oct 2019 10:01:35 -0500 Subject: drm/amdgpu: add asic callback for BACO support BACO - Bus Active, Chip Off Used to check whether the device supports BACO. This will be used to enable runtime pm on devices which support BACO. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 89499501ecec..6648e7788669 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -589,6 +589,8 @@ struct amdgpu_asic_funcs { bool (*need_reset_on_init)(struct amdgpu_device *adev); /* PCIe replay counter */ uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); + /* device supports BACO */ + bool (*supports_baco)(struct amdgpu_device *adev); }; /* @@ -1120,6 +1122,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) +#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev)) + #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); /* Common functions */ -- cgit From 988eb9ff3e911763a115cb8e778ab311357c7fd5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 15 Oct 2019 16:23:31 -0400 Subject: drm/amdgpu: add supports_baco callback for soc15 asics. (v2) BACO - Bus Active, Chip Off Check the BACO capabilities from the powerplay table. v2: drop unrelated struct cleanup Reviewed-by: Evan Quan (v1) Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 1d19812a5c38..36aa400de774 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -602,6 +602,28 @@ static int soc15_asic_reset(struct amdgpu_device *adev) } } +static bool soc15_supports_baco(struct amdgpu_device *adev) +{ + bool baco_support; + + switch (adev->asic_type) { + case CHIP_VEGA10: + case CHIP_VEGA12: + soc15_asic_get_baco_capability(adev, &baco_support); + break; + case CHIP_VEGA20: + if (adev->psp.sos_fw_version >= 0x80067) + soc15_asic_get_baco_capability(adev, &baco_support); + else + baco_support = false; + break; + default: + return false; + } + + return baco_support; +} + /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, u32 cntl_reg, u32 status_reg) { @@ -1004,6 +1026,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = .get_pcie_usage = &soc15_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, + .supports_baco = &soc15_supports_baco, }; static const struct amdgpu_asic_funcs vega20_asic_funcs = @@ -1025,6 +1048,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .get_pcie_usage = &vega20_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, + .supports_baco = &soc15_supports_baco, }; static int soc15_common_early_init(void *handle) -- cgit From 3670c242e3409ae4bfd9699ebd301ef7d792cb8a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Oct 2019 10:15:36 -0500 Subject: drm/amdgpu: add supports_baco callback for SI asics. BACO - Bus Active, Chip Off Not supported. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/si.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 29024e64c886..cb682d44737a 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1197,6 +1197,11 @@ static int si_asic_reset(struct amdgpu_device *adev) return 0; } +static bool si_asic_supports_baco(struct amdgpu_device *adev) +{ + return false; +} + static enum amd_reset_method si_asic_reset_method(struct amdgpu_device *adev) { @@ -1425,6 +1430,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .get_pcie_usage = &si_get_pcie_usage, .need_reset_on_init = &si_need_reset_on_init, .get_pcie_replay_count = &si_get_pcie_replay_count, + .supports_baco = &si_asic_supports_baco, }; static uint32_t si_get_rev_id(struct amdgpu_device *adev) -- cgit From 0d0c07ee0794ed4a0b14a334d39a692df3e984e5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Oct 2019 10:16:15 -0500 Subject: drm/amdgpu: add supports_baco callback for CIK asics. BACO - Bus Active, Chip Off Check the BACO capabilities from the powerplay table. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cik.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 2d64d270725d..968bc706b94d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1310,6 +1310,23 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev) return r; } +static bool cik_asic_supports_baco(struct amdgpu_device *adev) +{ + bool baco_support; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + smu7_asic_get_baco_capability(adev, &baco_support); + break; + default: + baco_support = false; + break; + } + + return baco_support; +} + static enum amd_reset_method cik_asic_reset_method(struct amdgpu_device *adev) { @@ -1899,6 +1916,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .get_pcie_usage = &cik_get_pcie_usage, .need_reset_on_init = &cik_need_reset_on_init, .get_pcie_replay_count = &cik_get_pcie_replay_count, + .supports_baco = &cik_asic_supports_baco, }; static int cik_common_early_init(void *handle) -- cgit From e45ed9435fda13d418872260d510eb01f23aa5d0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Oct 2019 10:17:05 -0500 Subject: drm/amdgpu: add supports_baco callback for VI asics. BACO - Bus Active, Chip Off Check the BACO capabilities from the powerplay table. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 78e5cdc0c058..871c0b8c6b0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -745,6 +745,27 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev) return r; } +static bool vi_asic_supports_baco(struct amdgpu_device *adev) +{ + bool baco_support; + + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_TOPAZ: + smu7_asic_get_baco_capability(adev, &baco_support); + break; + default: + baco_support = false; + break; + } + + return baco_support; +} + static enum amd_reset_method vi_asic_reset_method(struct amdgpu_device *adev) { @@ -1116,6 +1137,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .get_pcie_usage = &vi_get_pcie_usage, .need_reset_on_init = &vi_need_reset_on_init, .get_pcie_replay_count = &vi_get_pcie_replay_count, + .supports_baco = &vi_asic_supports_baco, }; #define CZ_REV_BRISTOL(rev) \ -- cgit From ac7426169e7bcbbf270fec48301286e5ccae08bc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 7 Nov 2019 18:12:17 -0500 Subject: drm/amdgpu: add supports_baco callback for NV asics. BACO - Bus Active, Chip Off Check the BACO capabilities from the powerplay table. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 343f47995657..8e7c6a4b0018 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -315,6 +315,16 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) return ret; } +static bool nv_asic_supports_baco(struct amdgpu_device *adev) +{ + struct smu_context *smu = &adev->smu; + + if (smu_baco_is_support(smu)) + return true; + else + return false; +} + static enum amd_reset_method nv_asic_reset_method(struct amdgpu_device *adev) { @@ -620,6 +630,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = .get_pcie_usage = &nv_get_pcie_usage, .need_reset_on_init = &nv_need_reset_on_init, .get_pcie_replay_count = &nv_get_pcie_replay_count, + .supports_baco = &nv_asic_supports_baco, }; static int nv_common_early_init(void *handle) -- cgit From a69cba42b11ae5e8cede2ee6a61d9faf5187df9b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 28 Oct 2019 14:47:38 -0400 Subject: drm/amdgpu: add a amdgpu_device_supports_baco helper BACO - Bus Active, Chip Off To check if a device supports BACO or not. This will be used in determining when to enable runtime pm. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6648e7788669..bdcfaf11f5e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1141,6 +1141,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, const u32 array_size); bool amdgpu_device_is_px(struct drm_device *dev); +bool amdgpu_device_supports_baco(struct drm_device *dev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 27ed48bde3fa..314138a95ccd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -153,6 +153,21 @@ bool amdgpu_device_is_px(struct drm_device *dev) return false; } +/** + * amdgpu_device_supports_baco - Does the device support BACO + * + * @dev: drm_device pointer + * + * Returns true if the device supporte BACO, + * otherwise return false. + */ +bool amdgpu_device_supports_baco(struct drm_device *dev) +{ + struct amdgpu_device *adev = dev->dev_private; + + return amdgpu_asic_supports_baco(adev); +} + /** * VRAM access helper functions. * -- cgit From 31af062acfbd5db8b0b99d0ad418b33d4458e206 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Oct 2019 10:42:22 -0500 Subject: drm/amdgpu: rename amdgpu_device_is_px to amdgpu_device_supports_boco (v2) BACO - Bus Active, Chip Off BOCO - Bus Off, Chip Off To better match what we are checking for and to align with amdgpu_device_supports_baco. BOCO is used on PowerXpress/Hybrid Graphics systems and BACO is used on desktop dGPU boards. v2: fix typo in documentation Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bdcfaf11f5e7..56ef19751c05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1140,7 +1140,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, const u32 *registers, const u32 array_size); -bool amdgpu_device_is_px(struct drm_device *dev); +bool amdgpu_device_supports_boco(struct drm_device *dev); bool amdgpu_device_supports_baco(struct drm_device *dev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 314138a95ccd..635091bad874 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -137,14 +137,14 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO, static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); /** - * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control + * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control * * @dev: drm_device pointer * * Returns true if the device is a dGPU with HG/PX power control, * otherwise return false. */ -bool amdgpu_device_is_px(struct drm_device *dev) +bool amdgpu_device_supports_boco(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; @@ -1088,7 +1088,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero { struct drm_device *dev = pci_get_drvdata(pdev); - if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) + if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { @@ -2913,7 +2913,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, * ignore it */ vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); - if (amdgpu_device_is_px(ddev)) + if (amdgpu_device_supports_boco(ddev)) runtime = true; if (!pci_is_thunderbolt_attached(adev->pdev)) vga_switcheroo_register_client(adev->pdev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0ffc9447b573..709861b48510 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1155,7 +1155,7 @@ static int amdgpu_pmops_resume(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); /* GPU comes up enabled by the bios on resume */ - if (amdgpu_device_is_px(drm_dev)) { + if (amdgpu_device_supports_boco(drm_dev)) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -1203,7 +1203,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) struct drm_device *drm_dev = pci_get_drvdata(pdev); int ret; - if (!amdgpu_device_is_px(drm_dev)) { + if (!amdgpu_device_supports_boco(drm_dev)) { pm_runtime_forbid(dev); return -EBUSY; } @@ -1230,7 +1230,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) struct drm_device *drm_dev = pci_get_drvdata(pdev); int ret; - if (!amdgpu_device_is_px(drm_dev)) + if (!amdgpu_device_supports_boco(drm_dev)) return -EINVAL; drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; @@ -1255,7 +1255,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_crtc *crtc; - if (!amdgpu_device_is_px(drm_dev)) { + if (!amdgpu_device_supports_boco(drm_dev)) { pm_runtime_forbid(dev); return -EBUSY; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 31d039a3f8c0..a1628e95573f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (amdgpu_sriov_vf(adev)) amdgpu_virt_request_full_gpu(adev, false); - if (amdgpu_device_is_px(dev)) { + if (amdgpu_device_supports_boco(dev)) { pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); } @@ -180,7 +180,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) "Error during ACPI methods call\n"); } - if (amdgpu_device_is_px(dev)) { + if (amdgpu_device_supports_boco(dev)) { dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); @@ -193,7 +193,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ - if (adev->rmmio && amdgpu_device_is_px(dev)) + if (adev->rmmio && amdgpu_device_supports_boco(dev)) pm_runtime_put_noidle(dev->dev); amdgpu_driver_unload_kms(dev); } -- cgit From b97e9d47e549caacea9504822301c34d447c5fcf Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Oct 2019 10:50:24 -0500 Subject: drm/amdgpu: add additional boco checks to runtime suspend/resume (v2) BACO - Bus Active, Chip Off BOCO - Bus Off, Chip Off We will take slightly different paths for boco and baco. v2: fold together two consecutive if clauses Reviewed-by: Evan Quan (v1) Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 47 ++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 709861b48510..47b51588c762 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1208,18 +1208,21 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) return -EBUSY; } - drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + if (amdgpu_device_supports_boco(drm_dev)) + drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(drm_dev); ret = amdgpu_device_suspend(drm_dev, false, false); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_ignore_hotplug(pdev); - if (amdgpu_is_atpx_hybrid()) - pci_set_power_state(pdev, PCI_D3cold); - else if (!amdgpu_has_atpx_dgpu_power_cntl()) - pci_set_power_state(pdev, PCI_D3hot); - drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; + if (amdgpu_device_supports_boco(drm_dev)) { + pci_save_state(pdev); + pci_disable_device(pdev); + pci_ignore_hotplug(pdev); + if (amdgpu_is_atpx_hybrid()) + pci_set_power_state(pdev, PCI_D3cold); + else if (!amdgpu_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D3hot); + drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; + } return 0; } @@ -1233,20 +1236,22 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) if (!amdgpu_device_supports_boco(drm_dev)) return -EINVAL; - drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - - if (amdgpu_is_atpx_hybrid() || - !amdgpu_has_atpx_dgpu_power_cntl()) - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); - if (ret) - return ret; - pci_set_master(pdev); - + if (amdgpu_device_supports_boco(drm_dev)) { + drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + + if (amdgpu_is_atpx_hybrid() || + !amdgpu_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + } ret = amdgpu_device_resume(drm_dev, false, false); drm_kms_helper_poll_enable(drm_dev); - drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; + if (amdgpu_device_supports_boco(drm_dev)) + drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; return 0; } -- cgit From 11520f27085bbab7dcb2b5998dec7e7abe3a5bd1 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 28 Oct 2019 15:20:03 -0400 Subject: drm/amdgpu: split swSMU baco_reset into enter and exit BACO - Bus Active, Chip Off So we can use it for power savings rather than just reset. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 7 ++++++- drivers/gpu/drm/amd/amdgpu/soc15.c | 10 ++++++++-- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 20 +++++++++++++++++--- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 3 ++- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 6 ++++-- drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h | 3 ++- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 3 ++- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 9 ++++++++- drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 3 ++- 9 files changed, 51 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 8e7c6a4b0018..b0229543e887 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -353,7 +353,12 @@ static int nv_asic_reset(struct amdgpu_device *adev) if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (!adev->in_suspend) amdgpu_inc_vram_lost(adev); - ret = smu_baco_reset(smu); + ret = smu_baco_enter(smu); + if (ret) + return ret; + ret = smu_baco_exit(smu); + if (ret) + return ret; } else { if (!adev->in_suspend) amdgpu_inc_vram_lost(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 36aa400de774..951327f759fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -511,9 +511,15 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev) if (is_support_sw_smu(adev)) { struct smu_context *smu = &adev->smu; + int ret; - if (smu_baco_reset(smu)) - return -EIO; + ret = smu_baco_enter(smu); + if (ret) + return ret; + + ret = smu_baco_exit(smu); + if (ret) + return ret; } else { void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index b73561957d24..36001a4f65d7 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -2456,14 +2456,28 @@ int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) return 0; } -int smu_baco_reset(struct smu_context *smu) +int smu_baco_enter(struct smu_context *smu) { int ret = 0; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->baco_reset) - ret = smu->ppt_funcs->baco_reset(smu); + if (smu->ppt_funcs->baco_enter) + ret = smu->ppt_funcs->baco_enter(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_baco_exit(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->baco_exit) + ret = smu->ppt_funcs->baco_exit(smu); mutex_unlock(&smu->mutex); diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 1af0b1ab6f70..68107de7435c 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -2163,7 +2163,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .baco_is_support= smu_v11_0_baco_is_support, .baco_get_state = smu_v11_0_baco_get_state, .baco_set_state = smu_v11_0_baco_set_state, - .baco_reset = smu_v11_0_baco_reset, + .baco_enter = smu_v11_0_baco_enter, + .baco_exit = smu_v11_0_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .override_pcie_parameters = smu_v11_0_override_pcie_parameters, diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 5bac7efcd6ee..ada4a8dc4112 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -546,7 +546,8 @@ struct pptable_funcs { bool (*baco_is_support)(struct smu_context *smu); enum smu_baco_state (*baco_get_state)(struct smu_context *smu); int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state); - int (*baco_reset)(struct smu_context *smu); + int (*baco_enter)(struct smu_context *smu); + int (*baco_exit)(struct smu_context *smu); int (*mode2_reset)(struct smu_context *smu); int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); @@ -628,7 +629,8 @@ bool smu_baco_is_support(struct smu_context *smu); int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state); -int smu_baco_reset(struct smu_context *smu); +int smu_baco_enter(struct smu_context *smu); +int smu_baco_exit(struct smu_context *smu); int smu_mode2_reset(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index 606149085683..5a277136f2aa 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -248,7 +248,8 @@ enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu); int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state); -int smu_v11_0_baco_reset(struct smu_context *smu); +int smu_v11_0_baco_enter(struct smu_context *smu); +int smu_v11_0_baco_exit(struct smu_context *smu); int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index bc44bc4e0b9d..f8422462e3b5 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -2109,7 +2109,8 @@ static const struct pptable_funcs navi10_ppt_funcs = { .baco_is_support= smu_v11_0_baco_is_support, .baco_get_state = smu_v11_0_baco_get_state, .baco_set_state = smu_v11_0_baco_set_state, - .baco_reset = smu_v11_0_baco_reset, + .baco_enter = smu_v11_0_baco_enter, + .baco_exit = smu_v11_0_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .override_pcie_parameters = smu_v11_0_override_pcie_parameters, diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index f677743cb511..b90eb8172849 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1715,7 +1715,7 @@ out: return ret; } -int smu_v11_0_baco_reset(struct smu_context *smu) +int smu_v11_0_baco_enter(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret = 0; @@ -1733,6 +1733,13 @@ int smu_v11_0_baco_reset(struct smu_context *smu) msleep(10); + return ret; +} + +int smu_v11_0_baco_exit(struct smu_context *smu) +{ + int ret = 0; + ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 0b4892833808..02ede5c8b73a 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -3257,7 +3257,8 @@ static const struct pptable_funcs vega20_ppt_funcs = { .baco_is_support= smu_v11_0_baco_is_support, .baco_get_state = smu_v11_0_baco_get_state, .baco_set_state = smu_v11_0_baco_set_state, - .baco_reset = smu_v11_0_baco_reset, + .baco_enter = smu_v11_0_baco_enter, + .baco_exit = smu_v11_0_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .override_pcie_parameters = smu_v11_0_override_pcie_parameters, -- cgit From 361dbd01a1de8bdd6bdf9a879ae23a121b8f7266 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Oct 2019 12:33:09 -0500 Subject: drm/amdgpu: add helpers for baco entry and exit BACO - Bus Active, Chip Off Will be used for runtime pm. Entry will enter the BACO state (chip off). Exit will exit the BACO state (chip on). Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 61 ++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 56ef19751c05..c1e6a5f80c8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1144,6 +1144,8 @@ bool amdgpu_device_supports_boco(struct drm_device *dev); bool amdgpu_device_supports_baco(struct drm_device *dev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); +int amdgpu_device_baco_enter(struct drm_device *dev); +int amdgpu_device_baco_exit(struct drm_device *dev); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 635091bad874..612c4cc82d6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4296,3 +4296,64 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) } } +int amdgpu_device_baco_enter(struct drm_device *dev) +{ + struct amdgpu_device *adev = dev->dev_private; + + if (!amdgpu_device_supports_baco(adev->ddev)) + return -ENOTSUPP; + + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; + int ret; + + ret = smu_baco_enter(smu); + if (ret) + return ret; + + return 0; + } else { + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 1)) + return -EIO; + + return 0; + } +} + +int amdgpu_device_baco_exit(struct drm_device *dev) +{ + struct amdgpu_device *adev = dev->dev_private; + + if (!amdgpu_device_supports_baco(adev->ddev)) + return -ENOTSUPP; + + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; + int ret; + + ret = smu_baco_exit(smu); + if (ret) + return ret; + + return 0; + } else { + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* exit BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 0)) + return -EIO; + + return 0; + } +} -- cgit From 191343172809aba0047c2eb03249cb704ad65658 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Oct 2019 12:54:12 -0500 Subject: drm/amdgpu: add baco support to runtime suspend/resume BACO - Bus Active, Chip Off This adds the necessary support to the runtime suspend and resume functions to handle boards that support baco. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 47b51588c762..43070973d6a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1155,7 +1155,8 @@ static int amdgpu_pmops_resume(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); /* GPU comes up enabled by the bios on resume */ - if (amdgpu_device_supports_boco(drm_dev)) { + if (amdgpu_device_supports_boco(drm_dev) || + amdgpu_device_supports_baco(drm_dev)) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -1222,6 +1223,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) else if (!amdgpu_has_atpx_dgpu_power_cntl()) pci_set_power_state(pdev, PCI_D3hot); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; + } else if (amdgpu_device_supports_baco(drm_dev)) { + amdgpu_device_baco_enter(drm_dev); } return 0; @@ -1247,6 +1250,8 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) if (ret) return ret; pci_set_master(pdev); + } else if (amdgpu_device_supports_baco(drm_dev)) { + amdgpu_device_baco_exit(drm_dev); } ret = amdgpu_device_resume(drm_dev, false, false); drm_kms_helper_poll_enable(drm_dev); -- cgit From 6ae6c7d404ec3e7595c2c6bee8df211a34da5c64 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 7 Nov 2019 18:13:14 -0500 Subject: drm/amdgpu: start to disentangle boco from runtime pm BACO - Bus Active, Chip Off BOCO - Bus Off, Chip Off We originally only supported runtime pm on PX/HG laptops so most of the runtime pm code looks for this. Add a new flag to check for runtime pm enablement and use this rather than checking for PX/HG. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 9 ++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 13 ++++++++----- 3 files changed, 16 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c1e6a5f80c8a..dd961f0a5a75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -987,6 +987,8 @@ struct amdgpu_device { /* device pstate */ int pstate; + /* enable runtime pm on the device */ + bool runpm; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 43070973d6a6..348eb2f71e88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1202,9 +1202,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_dev->dev_private; int ret; - if (!amdgpu_device_supports_boco(drm_dev)) { + if (!adev->runpm) { pm_runtime_forbid(dev); return -EBUSY; } @@ -1234,9 +1235,10 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_dev->dev_private; int ret; - if (!amdgpu_device_supports_boco(drm_dev)) + if (!adev->runpm) return -EINVAL; if (amdgpu_device_supports_boco(drm_dev)) { @@ -1263,9 +1265,10 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) static int amdgpu_pmops_runtime_idle(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_dev->dev_private; struct drm_crtc *crtc; - if (!amdgpu_device_supports_boco(drm_dev)) { + if (!adev->runpm) { pm_runtime_forbid(dev); return -EBUSY; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a1628e95573f..402040a2c36f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (amdgpu_sriov_vf(adev)) amdgpu_virt_request_full_gpu(adev, false); - if (amdgpu_device_supports_boco(dev)) { + if (adev->runpm) { pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); } @@ -150,14 +150,17 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } dev->dev_private = (void *)adev; - if ((amdgpu_runtime_pm != 0) && - amdgpu_has_atpx() && + if (amdgpu_has_atpx() && (amdgpu_is_atpx_hybrid() || amdgpu_has_atpx_dgpu_power_cntl()) && ((flags & AMD_IS_APU) == 0) && !pci_is_thunderbolt_attached(dev->pdev)) flags |= AMD_IS_PX; + if ((amdgpu_runtime_pm != 0) && + (flags & AMD_IS_PX)) + adev->runpm = true; + /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must @@ -180,7 +183,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) "Error during ACPI methods call\n"); } - if (amdgpu_device_supports_boco(dev)) { + if (adev->runpm) { dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); @@ -193,7 +196,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ - if (adev->rmmio && amdgpu_device_supports_boco(dev)) + if (adev->rmmio && adev->runpm) pm_runtime_put_noidle(dev->dev); amdgpu_driver_unload_kms(dev); } -- cgit From 3840c5bcc2456381ca53f3f9604915aa36249faf Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Oct 2019 13:25:37 -0500 Subject: drm/amdgpu: disentangle runtime pm and vga_switcheroo Originally we only supported runtime pm on PX/HG laptops so vga_switcheroo and runtime pm are sort of entangled. Attempt to logically separate them. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 612c4cc82d6c..d472526d97b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2743,7 +2743,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags) { int r, i; - bool runtime = false; + bool boco = false; u32 max_MBps; adev->shutdown = false; @@ -2914,11 +2914,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); if (amdgpu_device_supports_boco(ddev)) - runtime = true; - if (!pci_is_thunderbolt_attached(adev->pdev)) + boco = true; + if (amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && + !pci_is_thunderbolt_attached(adev->pdev)) vga_switcheroo_register_client(adev->pdev, - &amdgpu_switcheroo_ops, runtime); - if (runtime) + &amdgpu_switcheroo_ops, boco); + if (boco) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); if (amdgpu_emu_mode == 1) { @@ -3102,7 +3105,7 @@ fence_driver_init: failed: amdgpu_vf_error_trans_all(adev); - if (runtime) + if (boco) vga_switcheroo_fini_domain_pm_ops(adev->dev); return r; @@ -3150,9 +3153,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; - if (!pci_is_thunderbolt_attached(adev->pdev)) + if (amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && + !pci_is_thunderbolt_attached(adev->pdev)) vga_switcheroo_unregister_client(adev->pdev); - if (adev->flags & AMD_IS_PX) + if (amdgpu_device_supports_boco(adev->ddev)) vga_switcheroo_fini_domain_pm_ops(adev->dev); vga_client_register(adev->pdev, NULL, NULL, NULL); if (adev->rio_mem) -- cgit From 72f058b7237ede2be2d98c70bdabbe7c3e587ae9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Oct 2019 13:47:39 -0500 Subject: drm/amdgpu: enable runtime pm on BACO capable boards if runpm=1 BACO - Bus Active, Chip Off Everything is in place now. Not enabled by default yet. You still have to specify runpm=1. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 402040a2c36f..f7b9296fb1c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -157,10 +157,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) !pci_is_thunderbolt_attached(dev->pdev)) flags |= AMD_IS_PX; - if ((amdgpu_runtime_pm != 0) && - (flags & AMD_IS_PX)) - adev->runpm = true; - /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must @@ -173,6 +169,13 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) goto out; } + if (amdgpu_device_supports_boco(dev) && + (amdgpu_runtime_pm != 0)) /* enable runpm by default */ + adev->runpm = true; + else if (amdgpu_device_supports_baco(dev) && + (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */ + adev->runpm = true; + /* Call ACPI methods: require modeset init * but failure is not fatal */ -- cgit From f43ef951f6c0ddd71787b28de4e5d96b40c832ca Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Mon, 18 Nov 2019 13:28:46 -0600 Subject: drm/amdgpu: add flag to indicate amdgpu vm context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flag added to indicate if the amdgpu vm context is used for compute or graphics. Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 598c24505c73..63f6e46bd642 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2708,6 +2708,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error_free_direct; vm->pte_support_ats = false; + vm->is_compute_context = false; if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & @@ -2893,6 +2894,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs = &amdgpu_vm_sdma_funcs; dma_fence_put(vm->last_update); vm->last_update = NULL; + vm->is_compute_context = true; if (vm->pasid) { unsigned long flags; @@ -2947,6 +2949,7 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } vm->pasid = 0; + vm->is_compute_context = false; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 4dbbe1b6b413..67f946b0c515 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -298,6 +298,8 @@ struct amdgpu_vm { struct ttm_lru_bulk_move lru_bulk_move; /* mark whether can do the bulk move */ bool bulk_moveable; + /* Flag to indicate if VM is used for compute */ + bool is_compute_context; }; struct amdgpu_vm_manager { -- cgit From b4672c8a84beba6d04dc564b02e439590b5c93f7 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Mon, 18 Nov 2019 15:33:07 -0600 Subject: amd/amdgpu: force to trigger a no-retry-fault after a retry-fault MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only for the debugger use case. [why] Avoid endless translation retries, after an invalid address access has been issued to the GPU. Instead, the trap handler is forced to enter by generating a no-retry-fault. A s_trap instruction is inserted in the debugger case to let the wave to enter trap handler to save context. [how] Intentionally using an invalid flag combination (F and P set at the same time) to trigger a no-retry-fault, after a retry-fault happens. This is only valid under compute context. Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 63f6e46bd642..f20b572d2438 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -3197,11 +3197,20 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | AMDGPU_PTE_SYSTEM; - if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { + if (vm->is_compute_context) { + /* Intentionally setting invalid PTE flag + * combination to force a no-retry-fault + */ + flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | + AMDGPU_PTE_TF; + value = 0; + + } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { /* Redirect the access to the dummy page */ value = adev->dummy_page_addr; flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE; + } else { /* Let the hw retry silently on the PTE */ value = 0; -- cgit From 387d40fd6fb64819ae67343a4eb3776ded7932d6 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 14 Nov 2019 16:56:08 +0800 Subject: drm/amdgpu/gfx10: explicitly wait for cp idle after halt/unhalt 50us is not enough to wait for cp ready after gpu reset on some navi asics. Signed-off-by: Xiaojie Yuan Suggested-by: Jack Xiao Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index b1280120845f..f439dc348fe2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -2400,7 +2400,7 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) return 0; } -static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) +static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { int i; u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); @@ -2413,7 +2413,17 @@ static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) adev->gfx.gfx_ring[i].sched.ready = false; } WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); - udelay(50); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); + + return 0; } static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) -- cgit From d98a07aea6528e7bc49c01dbad0ed3379d46ceee Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Fri, 22 Nov 2019 14:21:15 -0500 Subject: drm/amdgpu/gfx10: fix out-of-bound mqd_backup array access Fixes: 0bb419c76b3150 ("drm/amdgpu/gfx10: fix mqd backup/restore for gfx rings (v2)") Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f063a5c7bd8e..e00b46180d2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -454,8 +454,6 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) } ring = &adev->gfx.kiq.ring; - if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) - kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS - 1]); kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, -- cgit From 46f719696ee62a7637116791bb4f571d030569cd Mon Sep 17 00:00:00 2001 From: Dennis Li Date: Tue, 19 Nov 2019 16:25:25 +0800 Subject: drm/amdgpu: define soc15_ras_field_entry for reuse The struct soc15_ras_field_entry will be reused by other IPs, such as mmhub and gc v2: rename ras_subblock_regs to gc_ras_fields_vg20, because the future asic maybe have a different table. Signed-off-by: Dennis Li Reviewed-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 34 +++++++++++----------------------- drivers/gpu/drm/amd/amdgpu/soc15.h | 12 ++++++++++++ 2 files changed, 23 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3e4ac2f06c3b..308d5ccbf4e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -131,18 +131,6 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 -struct ras_gfx_subblock_reg { - const char *name; - uint32_t hwip; - uint32_t inst; - uint32_t seg; - uint32_t reg_offset; - uint32_t sec_count_mask; - uint32_t sec_count_shift; - uint32_t ded_count_mask; - uint32_t ded_count_shift; -}; - enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -5484,7 +5472,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, } -static const struct ras_gfx_subblock_reg ras_subblock_regs[] = { +static const struct soc15_ras_field_entry gc_ras_fields_vg20[] = { { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) @@ -6143,29 +6131,29 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg, uint32_t i; uint32_t sec_cnt, ded_cnt; - for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) { - if(ras_subblock_regs[i].reg_offset != reg->reg_offset || - ras_subblock_regs[i].seg != reg->seg || - ras_subblock_regs[i].inst != reg->inst) + for (i = 0; i < ARRAY_SIZE(gc_ras_fields_vg20); i++) { + if(gc_ras_fields_vg20[i].reg_offset != reg->reg_offset || + gc_ras_fields_vg20[i].seg != reg->seg || + gc_ras_fields_vg20[i].inst != reg->inst) continue; sec_cnt = (value & - ras_subblock_regs[i].sec_count_mask) >> - ras_subblock_regs[i].sec_count_shift; + gc_ras_fields_vg20[i].sec_count_mask) >> + gc_ras_fields_vg20[i].sec_count_shift; if (sec_cnt) { DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", - ras_subblock_regs[i].name, + gc_ras_fields_vg20[i].name, se_id, inst_id, sec_cnt); *sec_count += sec_cnt; } ded_cnt = (value & - ras_subblock_regs[i].ded_count_mask) >> - ras_subblock_regs[i].ded_count_shift; + gc_ras_fields_vg20[i].ded_count_mask) >> + gc_ras_fields_vg20[i].ded_count_shift; if (ded_cnt) { DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", - ras_subblock_regs[i].name, + gc_ras_fields_vg20[i].name, se_id, inst_id, ded_cnt); *ded_count += ded_cnt; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 9af6c6ffbfa2..344280b869c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -60,6 +60,18 @@ struct soc15_allowed_register_entry { bool grbm_indexed; }; +struct soc15_ras_field_entry { + const char *name; + uint32_t hwip; + uint32_t inst; + uint32_t seg; + uint32_t reg_offset; + uint32_t sec_count_mask; + uint32_t sec_count_shift; + uint32_t ded_count_mask; + uint32_t ded_count_shift; +}; + #define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg #define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset) -- cgit From 8781e5df119fbce754bf027c691208f677dc07c8 Mon Sep 17 00:00:00 2001 From: Dennis Li Date: Tue, 19 Nov 2019 16:02:28 +0800 Subject: drm/amdgpu: refine query function of mmhub EDC counter in vg20 Add codes to print the detail EDC info for the subblock of mmhub v2: Move the EDC_CNT registers' defintion from mmhub_9_4 header files to mmhub_1_0 ones. Add mmhub_v1_0_ prefix for the local static variable and function. v3: squash in DC fix Signed-off-by: Dennis Li Reviewed-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 232 ++++++++++++++----- .../drm/amd/display/dc/dce120/dce120_resource.c | 4 +- .../amd/include/asic_reg/mmhub/mmhub_1_0_offset.h | 16 ++ .../amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h | 122 ++++++++++ .../include/asic_reg/mmhub/mmhub_9_4_0_offset.h | 53 ----- .../include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h | 257 --------------------- 6 files changed, 320 insertions(+), 364 deletions(-) delete mode 100644 drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h delete mode 100644 drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 6965e1e6fa9e..d7575ac27038 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -27,17 +27,13 @@ #include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_sh_mask.h" #include "mmhub/mmhub_1_0_default.h" -#include "mmhub/mmhub_9_4_0_offset.h" #include "vega10_enum.h" - +#include "soc15.h" #include "soc15_common.h" #define mmDAGB0_CNTL_MISC2_RV 0x008f #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0 -#define EA_EDC_CNT_MASK 0x3 -#define EA_EDC_CNT_SHIFT 0x2 - u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) { u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE); @@ -562,59 +558,191 @@ void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) *flags |= AMD_CG_SUPPORT_MC_LS; } +static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = { + { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + } +}; + +static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = { + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0}, +}; + +static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg, + uint32_t value, uint32_t *sec_count, uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) { + if(mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset) + continue; + + sec_cnt = (value & + mmhub_v1_0_ras_fields[i].sec_count_mask) >> + mmhub_v1_0_ras_fields[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("MMHUB SubBlock %s, SEC %d\n", + mmhub_v1_0_ras_fields[i].name, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & + mmhub_v1_0_ras_fields[i].ded_count_mask) >> + mmhub_v1_0_ras_fields[i].ded_count_shift; + if (ded_cnt) { + DRM_INFO("MMHUB SubBlock %s, DED %d\n", + mmhub_v1_0_ras_fields[i].name, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - int i; - uint32_t ea0_edc_cnt, ea0_edc_cnt2; - uint32_t ea1_edc_cnt, ea1_edc_cnt2; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - /* EDC CNT will be cleared automatically after read */ - ea0_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT_VG20); - ea0_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20); - ea1_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT_VG20); - ea1_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20); - - /* error count of each error type is recorded by 2 bits, - * ce and ue count in EDC_CNT - */ - for (i = 0; i < 5; i++) { - err_data->ce_count += (ea0_edc_cnt & EA_EDC_CNT_MASK); - err_data->ce_count += (ea1_edc_cnt & EA_EDC_CNT_MASK); - ea0_edc_cnt >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt >>= EA_EDC_CNT_SHIFT; - err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK); - ea0_edc_cnt >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt >>= EA_EDC_CNT_SHIFT; - } - /* successive ue count in EDC_CNT */ - for (i = 0; i < 5; i++) { - err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK); - ea0_edc_cnt >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt >>= EA_EDC_CNT_SHIFT; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i; + uint32_t reg_value; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) { + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); + if (reg_value) + mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i], + reg_value, &sec_count, &ded_count); } - /* ce and ue count in EDC_CNT2 */ - for (i = 0; i < 3; i++) { - err_data->ce_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK); - err_data->ce_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK); - ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK); - ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - } - /* successive ue count in EDC_CNT2 */ - for (i = 0; i < 6; i++) { - err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK); - ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - } + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; } const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 305bb0841563..53ab88ef71f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -63,8 +63,8 @@ #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "nbio/nbio_6_1_offset.h" -#include "mmhub/mmhub_9_4_0_offset.h" -#include "mmhub/mmhub_9_4_0_sh_mask.h" +#include "mmhub/mmhub_1_0_offset.h" +#include "mmhub/mmhub_1_0_sh_mask.h" #include "reg_helper.h" #include "dce100/dce100_resource.h" diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h index 352ffae7a7ca..2c3ce243861a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h @@ -1964,4 +1964,20 @@ #define mmATC_L2_PERFCOUNTER_RSLT_CNTL 0x084a #define mmATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0 +/* MMEA */ +#define mmMMEA0_EDC_CNT_VG20 0x0206 +#define mmMMEA0_EDC_CNT_VG20_BASE_IDX 0 +#define mmMMEA0_EDC_CNT2_VG20 0x0207 +#define mmMMEA0_EDC_CNT2_VG20_BASE_IDX 0 +#define mmMMEA1_EDC_CNT_VG20 0x0346 +#define mmMMEA1_EDC_CNT_VG20_BASE_IDX 0 +#define mmMMEA1_EDC_CNT2_VG20 0x0347 +#define mmMMEA1_EDC_CNT2_VG20_BASE_IDX 0 + +// addressBlock: mmhub_utcl2_vmsharedpfdec +// base address: 0x6a040 +#define mmMC_VM_XGMI_LFB_CNTL 0x0823 +#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0 +#define mmMC_VM_XGMI_LFB_SIZE 0x0824 +#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0 #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h index 34278ef2aa1b..198f5f93ed1a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h @@ -10124,4 +10124,126 @@ #define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L #define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA0_EDC_CNT +#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA0_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA0_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA0_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA0_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA0_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA0_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA0_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA0_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA0_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA0_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA0_EDC_CNT2 +#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA0_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA0_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA0_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA0_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA0_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA0_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA0_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA0_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA0_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA0_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA0_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA0_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +//MMEA1_EDC_CNT +#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA1_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA1_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA1_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA1_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA1_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA1_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA1_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA1_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA1_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA1_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA1_EDC_CNT2 +#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA1_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA1_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA1_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA1_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA1_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA1_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA1_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA1_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA1_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA1_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA1_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA1_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L + +// addressBlock: mmhub_utcl2_vmsharedpfdec +//MC_VM_XGMI_LFB_CNTL +#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0 +#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4 +#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L +#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L +//MC_VM_XGMI_LFB_SIZE +#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0 +#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h deleted file mode 100644 index f2ae3a58949e..000000000000 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (C) 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN - * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#ifndef _mmhub_9_4_0_OFFSET_HEADER -#define _mmhub_9_4_0_OFFSET_HEADER - -/* MMEA */ -#define mmMMEA0_SDP_ARB_FINAL_VG20 0x01ee -#define mmMMEA0_SDP_ARB_FINAL_VG20_BASE_IDX 0 -#define mmMMEA0_EDC_CNT_VG20 0x0206 -#define mmMMEA0_EDC_CNT_VG20_BASE_IDX 0 -#define mmMMEA0_EDC_CNT2_VG20 0x0207 -#define mmMMEA0_EDC_CNT2_VG20_BASE_IDX 0 -#define mmMMEA0_EDC_MODE_VG20 0x0210 -#define mmMMEA0_EDC_MODE_VG20_BASE_IDX 0 -#define mmMMEA0_ERR_STATUS_VG20 0x0211 -#define mmMMEA0_ERR_STATUS_VG20_BASE_IDX 0 -#define mmMMEA1_SDP_ARB_FINAL_VG20 0x032e -#define mmMMEA1_SDP_ARB_FINAL_VG20_BASE_IDX 0 -#define mmMMEA1_EDC_CNT_VG20 0x0346 -#define mmMMEA1_EDC_CNT_VG20_BASE_IDX 0 -#define mmMMEA1_EDC_CNT2_VG20 0x0347 -#define mmMMEA1_EDC_CNT2_VG20_BASE_IDX 0 -#define mmMMEA1_EDC_MODE_VG20 0x0350 -#define mmMMEA1_EDC_MODE_VG20_BASE_IDX 0 -#define mmMMEA1_ERR_STATUS_VG20 0x0351 -#define mmMMEA1_ERR_STATUS_VG20_BASE_IDX 0 - -// addressBlock: mmhub_utcl2_vmsharedpfdec -// base address: 0x6a040 -#define mmMC_VM_XGMI_LFB_CNTL 0x0823 -#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0 -#define mmMC_VM_XGMI_LFB_SIZE 0x0824 -#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0 - -#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h deleted file mode 100644 index c24259ed12a1..000000000000 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright (C) 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN - * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#ifndef _mmhub_9_4_0_SH_MASK_HEADER -#define _mmhub_9_4_0_SH_MASK_HEADER - -//MMEA0_SDP_ARB_FINAL -#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 -#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 -#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa -#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 -#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 -#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a -#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL -#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L -#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L -#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L -#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L -#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L -//MMEA0_EDC_CNT -#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 -#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 -#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 -#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 -#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 -#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa -#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc -#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe -#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 -#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 -#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 -#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 -#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 -#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a -#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c -#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L -#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL -#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L -#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L -#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L -#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L -#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L -#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L -#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L -#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L -#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L -#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L -#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L -#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L -#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L -//MMEA0_EDC_CNT2 -#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 -#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 -#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 -#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 -#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 -#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa -#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc -#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe -#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 -#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 -#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 -#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 -#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L -#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL -#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L -#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L -#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L -#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L -#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L -#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L -#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L -#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L -#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L -#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L -//MMEA0_EDC_MODE -#define MMEA0_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 -#define MMEA0_EDC_MODE__GATE_FUE__SHIFT 0x11 -#define MMEA0_EDC_MODE__DED_MODE__SHIFT 0x14 -#define MMEA0_EDC_MODE__PROP_FED__SHIFT 0x1d -#define MMEA0_EDC_MODE__BYPASS__SHIFT 0x1f -#define MMEA0_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L -#define MMEA0_EDC_MODE__GATE_FUE_MASK 0x00020000L -#define MMEA0_EDC_MODE__DED_MODE_MASK 0x00300000L -#define MMEA0_EDC_MODE__PROP_FED_MASK 0x20000000L -#define MMEA0_EDC_MODE__BYPASS_MASK 0x80000000L -//MMEA0_ERR_STATUS -#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 -#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 -#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 -#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa -#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb -#define MMEA0_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc -#define MMEA0_ERR_STATUS__FUE_FLAG__SHIFT 0xd -#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL -#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L -#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L -#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L -#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L -#define MMEA0_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L -#define MMEA0_ERR_STATUS__FUE_FLAG_MASK 0x00002000L -//MMEA1_SDP_ARB_FINAL -#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 -#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 -#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa -#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 -#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 -#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a -#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL -#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L -#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L -#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L -#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L -#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L -//MMEA1_EDC_CNT -#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 -#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 -#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 -#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 -#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 -#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa -#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc -#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe -#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 -#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 -#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 -#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 -#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 -#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a -#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c -#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L -#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL -#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L -#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L -#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L -#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L -#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L -#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L -#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L -#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L -#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L -#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L -#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L -#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L -#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L -//MMEA1_EDC_CNT2 -#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 -#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 -#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 -#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 -#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 -#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa -#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc -#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe -#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 -#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 -#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 -#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 -#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L -#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL -#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L -#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L -#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L -#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L -#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L -#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L -#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L -#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L -#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L -#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L -//MMEA1_EDC_MODE -#define MMEA1_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 -#define MMEA1_EDC_MODE__GATE_FUE__SHIFT 0x11 -#define MMEA1_EDC_MODE__DED_MODE__SHIFT 0x14 -#define MMEA1_EDC_MODE__PROP_FED__SHIFT 0x1d -#define MMEA1_EDC_MODE__BYPASS__SHIFT 0x1f -#define MMEA1_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L -#define MMEA1_EDC_MODE__GATE_FUE_MASK 0x00020000L -#define MMEA1_EDC_MODE__DED_MODE_MASK 0x00300000L -#define MMEA1_EDC_MODE__PROP_FED_MASK 0x20000000L -#define MMEA1_EDC_MODE__BYPASS_MASK 0x80000000L -//MMEA1_ERR_STATUS -#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 -#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 -#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 -#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa -#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb -#define MMEA1_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc -#define MMEA1_ERR_STATUS__FUE_FLAG__SHIFT 0xd -#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL -#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L -#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L -#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L -#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L -#define MMEA1_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L -#define MMEA1_ERR_STATUS__FUE_FLAG_MASK 0x00002000L - -// addressBlock: mmhub_utcl2_vmsharedpfdec -//MC_VM_XGMI_LFB_CNTL -#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0 -#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4 -#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L -#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L -//MC_VM_XGMI_LFB_SIZE -#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0 -#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL - -#endif -- cgit From f6c3623b7b2f378f6210a6a5c898d1b36c722e6f Mon Sep 17 00:00:00 2001 From: Dennis Li Date: Tue, 19 Nov 2019 14:02:57 +0800 Subject: drm/amdgpu: implement querying ras error count for mmhub9.4 Get mmhub error counter by accessing EDC_CNT registers. v2: Add mmhub_v9_4_ prefix for local static variable and function Signed-off-by: Dennis Li Reviewed-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 + drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 253 +++++++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h | 2 + 3 files changed, 257 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index ee615d050837..5f4a6cdf83a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -658,6 +658,9 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) case CHIP_VEGA20: adev->mmhub.funcs = &mmhub_v1_0_funcs; break; + case CHIP_ARCTURUS: + adev->mmhub.funcs = &mmhub_v9_4_funcs; + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 2c5adfe803a2..6fe5c39e5581 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -21,6 +21,7 @@ * */ #include "amdgpu.h" +#include "amdgpu_ras.h" #include "mmhub_v9_4.h" #include "mmhub/mmhub_9_4_1_offset.h" @@ -29,7 +30,7 @@ #include "athub/athub_1_0_offset.h" #include "athub/athub_1_0_sh_mask.h" #include "vega10_enum.h" - +#include "soc15.h" #include "soc15_common.h" #define MMHUB_NUM_INSTANCES 2 @@ -651,3 +652,253 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags) if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) *flags |= AMD_CG_SUPPORT_MC_LS; } + +static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { + { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + } +}; + +static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = { + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0}, +}; + +static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, + uint32_t value, uint32_t *sec_count, uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) { + if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset) + continue; + + sec_cnt = (value & + mmhub_v9_4_ras_fields[i].sec_count_mask) >> + mmhub_v9_4_ras_fields[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("MMHUB SubBlock %s, SEC %d\n", + mmhub_v9_4_ras_fields[i].name, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & + mmhub_v9_4_ras_fields[i].ded_count_mask) >> + mmhub_v9_4_ras_fields[i].ded_count_shift; + if (ded_cnt) { + DRM_INFO("MMHUB SubBlock %s, DED %d\n", + mmhub_v9_4_ras_fields[i].name, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + +static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i; + uint32_t reg_value; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) { + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i])); + if (reg_value) + mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i], + reg_value, &sec_count, &ded_count); + } + + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; +} + +const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = { + .ras_late_init = amdgpu_mmhub_ras_late_init, + .query_ras_error_count = mmhub_v9_4_query_ras_error_count, +}; \ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h index d435cfcec1a8..354a4b7e875b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h @@ -23,6 +23,8 @@ #ifndef __MMHUB_V9_4_H__ #define __MMHUB_V9_4_H__ +extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs; + u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev); int mmhub_v9_4_gart_enable(struct amdgpu_device *adev); void mmhub_v9_4_gart_disable(struct amdgpu_device *adev); -- cgit From 6e04b2248dfd9b29d4a7b8cb55b491ec2a380298 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Wed, 20 Nov 2019 16:32:46 +0000 Subject: drm/amdgpu: Update Arcturus golden registers Signed-off-by: Jay Cornwall Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 308d5ccbf4e3..f6e39b332762 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -692,6 +692,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00), }; static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = -- cgit From d7c0b0477bf32f85ca43a1a306092deded50a0b7 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 8 Nov 2019 23:57:37 -0500 Subject: drm/amdkfd: Delete KFD_MQD_TYPE_COMPUTE It is the same as KFD_MQD_TYPE_CP, so delete it. As a result, we will have one less mqd mananger per device. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 3 +-- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 1 - drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 1 - drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 3 +-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 +-- 6 files changed, 4 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b42f34ef2b5c..f7f6df40875e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1595,7 +1595,7 @@ static int get_wave_state(struct device_queue_manager *dqm, goto dqm_unlock; } - mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE]; + mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; if (!mqd_mgr->get_wave_state) { r = -EINVAL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 28876aceb14b..b08694ec65d7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -374,7 +374,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - case KFD_MQD_TYPE_COMPUTE: mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd; mqd->free_mqd = free_mqd; @@ -442,7 +441,7 @@ struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, mqd = mqd_manager_init_cik(type, dev); if (!mqd) return NULL; - if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE)) + if (type == KFD_MQD_TYPE_CP) mqd->update_mqd = update_mqd_hawaii; return mqd; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 579c5ffcfa79..5a0e30441be8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -401,7 +401,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - case KFD_MQD_TYPE_COMPUTE: pr_debug("%s@%i\n", __func__, __LINE__); mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 22a819c888d8..bdbcea22ad12 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -444,7 +444,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - case KFD_MQD_TYPE_COMPUTE: mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd; mqd->free_mqd = free_mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 7d144f56f421..c9e1151b5a57 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -425,7 +425,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - case KFD_MQD_TYPE_COMPUTE: mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd; mqd->free_mqd = free_mqd; @@ -494,7 +493,7 @@ struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, mqd = mqd_manager_init_vi(type, dev); if (!mqd) return NULL; - if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE)) + if (type == KFD_MQD_TYPE_CP) mqd->update_mqd = update_mqd_tonga; return mqd; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 8ed691fb345a..fc61b5ec068e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -508,8 +508,7 @@ struct queue { * Please read the kfd_mqd_manager.h description. */ enum KFD_MQD_TYPE { - KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */ - KFD_MQD_TYPE_HIQ, /* for hiq */ + KFD_MQD_TYPE_HIQ = 0, /* for hiq */ KFD_MQD_TYPE_CP, /* for cp queues and diq */ KFD_MQD_TYPE_SDMA, /* for sdma queues */ KFD_MQD_TYPE_DIQ, /* for diq */ -- cgit From 7633c5e0bd190393362de46aea8ea34f8732a20e Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Sat, 9 Nov 2019 00:47:31 -0500 Subject: drm/amdkfd: DIQ should not use HIQ way to allocate memory In the mqd_diq_sdma buffer, there should be only one HIQ mqd. All DIQs should be allocated somewhere else using the regular way. Signed-off-by: Yong Zhao Reviewed-by: Oak Zeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index b08694ec65d7..19f0fe547c57 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -400,7 +400,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, #endif break; case KFD_MQD_TYPE_DIQ: - mqd->allocate_mqd = allocate_hiq_mqd; + mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd; mqd->load_mqd = load_mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 5a0e30441be8..8d21325b5cbb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -432,7 +432,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, pr_debug("%s@%i\n", __func__, __LINE__); break; case KFD_MQD_TYPE_DIQ: - mqd->allocate_mqd = allocate_hiq_mqd; + mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd; mqd->load_mqd = load_mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index bdbcea22ad12..df77d67ec9aa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -471,7 +471,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, #endif break; case KFD_MQD_TYPE_DIQ: - mqd->allocate_mqd = allocate_hiq_mqd; + mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd; mqd->load_mqd = load_mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index c9e1151b5a57..3b6b5671964c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -452,7 +452,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, #endif break; case KFD_MQD_TYPE_DIQ: - mqd->allocate_mqd = allocate_hiq_mqd; + mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd; mqd->load_mqd = load_mqd; -- cgit From 562b49fcd02d1b06f2706c31992d2fbfc011a634 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 20 Nov 2019 14:20:32 -0500 Subject: drm/amdgpu: simplify runtime suspend In the standard _PR3 case, the pci core handles the pci state. The driver only needs to handle it in the legacy ATPX case. This may fix issues with runtime suspend/resume on certain hybrid graphics laptops. Acked-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 35 +++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 348eb2f71e88..459bca6514e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1216,13 +1216,17 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) ret = amdgpu_device_suspend(drm_dev, false, false); if (amdgpu_device_supports_boco(drm_dev)) { - pci_save_state(pdev); - pci_disable_device(pdev); - pci_ignore_hotplug(pdev); - if (amdgpu_is_atpx_hybrid()) + /* Only need to handle PCI state in the driver for ATPX + * PCI core handles it for _PR3. + */ + if (amdgpu_is_atpx_hybrid()) { + pci_ignore_hotplug(pdev); + } else { + pci_save_state(pdev); + pci_disable_device(pdev); + pci_ignore_hotplug(pdev); pci_set_power_state(pdev, PCI_D3cold); - else if (!amdgpu_has_atpx_dgpu_power_cntl()) - pci_set_power_state(pdev, PCI_D3hot); + } drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; } else if (amdgpu_device_supports_baco(drm_dev)) { amdgpu_device_baco_enter(drm_dev); @@ -1244,14 +1248,19 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) if (amdgpu_device_supports_boco(drm_dev)) { drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - if (amdgpu_is_atpx_hybrid() || - !amdgpu_has_atpx_dgpu_power_cntl()) + /* Only need to handle PCI state in the driver for ATPX + * PCI core handles it for _PR3. + */ + if (amdgpu_is_atpx_hybrid()) { + pci_set_master(pdev); + } else { pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); - if (ret) - return ret; - pci_set_master(pdev); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + } } else if (amdgpu_device_supports_baco(drm_dev)) { amdgpu_device_baco_exit(drm_dev); } -- cgit From 2e77541bd17b635f9db834192edff373ac4d5ee3 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 21 Nov 2019 16:54:01 +0000 Subject: drm/amdgpu: remove redundant assignment to pointer write_frame The pointer write_frame is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 2a8a08aa6eaf..c02f9ffe5c6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1728,7 +1728,7 @@ int psp_ring_cmd_submit(struct psp_context *psp, int index) { unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem; + struct psp_gfx_rb_frame *write_frame; struct psp_ring *ring = &psp->km_ring; struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + -- cgit From 6a93b58e5fe233dae2fb98ac86610afdb6745da8 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 21 Nov 2019 14:54:03 +1100 Subject: merge fix for "ftrace: Rework event_create_dir()" Reviewed-by: Kevin Wang Signed-off-by: Stephen Rothwell Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index f940526c5889..63e734a125fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -473,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence), TP_ARGS(sched_job, fence), TP_STRUCT__entry( - __string(ring, sched_job->base.sched->name); + __string(ring, sched_job->base.sched->name) __field(uint64_t, id) __field(struct dma_fence *, fence) __field(uint64_t, ctx) -- cgit From c25edaaf75af4e291e63f58f17b5f5b499dd6e38 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 20 Nov 2019 14:02:22 +0800 Subject: drm/amdgpu/gfx10: re-init clear state buffer after gpu reset This patch fixes 2nd baco reset failure with gfxoff enabled on navi1x. clear state buffer (resides in vram) is corrupted after 1st baco reset, upon gfxoff exit, CPF gets garbage header in CSIB and hangs. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 43 +++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f439dc348fe2..ca5f0e7ea1ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1785,27 +1785,52 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); } -static void gfx_v10_0_init_csb(struct amdgpu_device *adev) +static int gfx_v10_0_init_csb(struct amdgpu_device *adev) { + int r; + + if (adev->in_gpu_reset) { + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (r) + return r; + + r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, + (void **)&adev->gfx.rlc.cs_ptr); + if (!r) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, + adev->gfx.rlc.cs_ptr); + amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); + } + + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + if (r) + return r; + } + /* csib */ WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO, adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); + + return 0; } -static void gfx_v10_0_init_pg(struct amdgpu_device *adev) +static int gfx_v10_0_init_pg(struct amdgpu_device *adev) { int i; + int r; - gfx_v10_0_init_csb(adev); + r = gfx_v10_0_init_csb(adev); + if (r) + return r; for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); /* TODO: init power gating */ - return; + return 0; } void gfx_v10_0_rlc_stop(struct amdgpu_device *adev) @@ -1907,7 +1932,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) r = gfx_v10_0_wait_for_rlc_autoload_complete(adev); if (r) return r; - gfx_v10_0_init_pg(adev); + + r = gfx_v10_0_init_pg(adev); + if (r) + return r; /* enable RLC SRM */ gfx_v10_0_rlc_enable_srm(adev); @@ -1933,7 +1961,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) return r; } - gfx_v10_0_init_pg(adev); + r = gfx_v10_0_init_pg(adev); + if (r) + return r; + adev->gfx.rlc.funcs->start(adev); if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { -- cgit From edc2176d516294433425acade06e4a7adc83f038 Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Thu, 21 Nov 2019 13:59:28 +0800 Subject: drm/amd/amdgpu/sriov temporarily skip ras,dtm,hdcp for arcturus VF Temporarily skip ras,dtm,hdcp initialize and terminate for arcturus VF Currently the three features haven't been enabled at SRIOV, it would trigger guest driver load fail with the bare-metal path of the three features. Signed-off-by: Jack Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 36 +++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c02f9ffe5c6b..b2cb9e5b1087 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -758,6 +758,12 @@ static int psp_ras_terminate(struct psp_context *psp) { int ret; + /* + * TODO: bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + if (!psp->ras.ras_initialized) return 0; @@ -779,6 +785,12 @@ static int psp_ras_initialize(struct psp_context *psp) { int ret; + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + if (!psp->adev->psp.ta_ras_ucode_size || !psp->adev->psp.ta_ras_start_addr) { dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n"); @@ -874,6 +886,12 @@ static int psp_hdcp_initialize(struct psp_context *psp) { int ret; + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + if (!psp->adev->psp.ta_hdcp_ucode_size || !psp->adev->psp.ta_hdcp_start_addr) { dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n"); @@ -962,6 +980,12 @@ static int psp_hdcp_terminate(struct psp_context *psp) { int ret; + /* + * TODO: bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + if (!psp->hdcp_context.hdcp_initialized) return 0; @@ -1053,6 +1077,12 @@ static int psp_dtm_initialize(struct psp_context *psp) { int ret; + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + if (!psp->adev->psp.ta_dtm_ucode_size || !psp->adev->psp.ta_dtm_start_addr) { dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n"); @@ -1111,6 +1141,12 @@ static int psp_dtm_terminate(struct psp_context *psp) { int ret; + /* + * TODO: bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + if (!psp->dtm_context.dtm_initialized) return 0; -- cgit From c348ad46b0e9503fb55ca84ea7427d736181223f Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Thu, 21 Nov 2019 14:09:08 +0800 Subject: drm/amd/amdgpu/sriov skip RLCG s/r list for arcturus VF. After rlcg fw 2.1, kmd driver starts to load extra fw for LIST_CNTL,GPM_MEM,SRM_MEM. We needs to skip the three fw because all rlcg related fw have been loaded by host driver. Guest driver would load the three fw fail without this change. Signed-off-by: Jack Zhang Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index b2cb9e5b1087..96a6b00eeede 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1470,7 +1470,10 @@ out: || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G)) + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) /*skip ucode loading in SRIOV VF */ continue; -- cgit From dab5ef2722cde781ff3f6b3542d673ed174359b8 Mon Sep 17 00:00:00 2001 From: changzhu Date: Tue, 19 Nov 2019 10:18:39 +0800 Subject: drm/amdgpu: initialize vm_inv_eng0_sem for gfxhub and mmhub MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SW must acquire/release one of the vm_invalidate_eng*_sem around the invalidation req/ack. Through this way,it can avoid losing invalidate acknowledge state across power-gating off cycle. To use vm_invalidate_eng*_sem, it needs to initialize vm_invalidate_eng*_sem firstly. Signed-off-by: changzhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 1 + drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 4 ++++ 6 files changed, 13 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 406736a1bd3d..b499a3de8bb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -77,6 +77,7 @@ struct amdgpu_gmc_fault { struct amdgpu_vmhub { uint32_t ctx0_ptb_addr_lo32; uint32_t ctx0_ptb_addr_hi32; + uint32_t vm_inv_eng0_sem; uint32_t vm_inv_eng0_req; uint32_t vm_inv_eng0_ack; uint32_t vm_context0_cntl; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 9ec4297e61e5..e91bd7945777 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -367,6 +367,8 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index b4f32d853ca1..b70c7b483c24 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -356,6 +356,8 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index d7575ac27038..adfd8a6171eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -416,6 +416,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 945533634711..a7cb185d639a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -348,6 +348,8 @@ void mmhub_v2_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 6fe5c39e5581..753eea25b569 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -505,6 +505,10 @@ void mmhub_v9_4_init(struct amdgpu_device *adev) SOC15_REG_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + i * MMHUB_INSTANCE_REGISTER_OFFSET; + hub[i]->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2VC0_VM_INVALIDATE_ENG0_SEM) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; hub[i]->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmVML2VC0_VM_INVALIDATE_ENG0_REQ) + -- cgit From 4ed8a03740d0ce092563c8fcb76d2c28da4675cd Mon Sep 17 00:00:00 2001 From: changzhu Date: Tue, 19 Nov 2019 11:13:29 +0800 Subject: drm/amdgpu: invalidate mmhub semaphore workaround in gmc9/gmc10 It may lose gpuvm invalidate acknowldege state across power-gating off cycle. To avoid this issue in gmc9/gmc10 invalidation, add semaphore acquire before invalidation and semaphore release after invalidation. After adding semaphore acquire before invalidation, the semaphore register become read-only if another process try to acquire semaphore. Then it will not be able to release this semaphore. Then it may cause deadlock problem. If this deadlock problem happens, it needs a semaphore firmware fix. Signed-off-by: changzhu Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 57 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 57 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15.h | 4 +-- 3 files changed, 116 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 27f68d32bfec..321f8a997be8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -235,6 +235,29 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, const unsigned eng = 17; unsigned int i; + spin_lock(&adev->gmc.invalidate_lock); + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) { + for (i = 0; i < adev->usec_timeout; i++) { + /* a read return value of 1 means semaphore acuqire */ + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); + if (tmp & 0x1) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); + } + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); /* @@ -254,6 +277,17 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, udelay(1); } + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); + + spin_unlock(&adev->gmc.invalidate_lock); + if (i < adev->usec_timeout) return; @@ -338,6 +372,20 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) + /* a read return value of 1 means semaphore acuqire */ + amdgpu_ring_emit_reg_wait(ring, + hub->vm_inv_eng0_sem + eng, 0x1, 0x1); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); @@ -348,6 +396,15 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, hub->vm_inv_eng0_ack + eng, req, 1 << vmid); + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); + return pd_addr; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 5f4a6cdf83a7..365a88ecdec4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -459,6 +459,29 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } spin_lock(&adev->gmc.invalidate_lock); + + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) { + for (j = 0; j < adev->usec_timeout; j++) { + /* a read return value of 1 means semaphore acuqire */ + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); + if (tmp & 0x1) + break; + udelay(1); + } + + if (j >= adev->usec_timeout) + DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); + } + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); /* @@ -474,7 +497,18 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, break; udelay(1); } + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); + spin_unlock(&adev->gmc.invalidate_lock); + if (j < adev->usec_timeout) return; @@ -489,6 +523,20 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) + /* a read return value of 1 means semaphore acuqire */ + amdgpu_ring_emit_reg_wait(ring, + hub->vm_inv_eng0_sem + eng, 0x1, 0x1); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); @@ -499,6 +547,15 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, hub->vm_inv_eng0_ack + eng, req, 1 << vmid); + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); + return pd_addr; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 344280b869c4..d0fb7a67c1a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -28,8 +28,8 @@ #include "nbio_v7_0.h" #include "nbio_v7_4.h" -#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4 -#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1 +#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6 +#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3 extern const struct amd_ip_funcs soc15_common_ip_funcs; -- cgit From c8c50a7e5d6c7be119622155bda940fccc8ca569 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Sat, 9 Nov 2019 01:16:05 -0500 Subject: drm/amdkfd: Remove duplicate functions update_mqd_hiq() The functions are the same as update_mqd(). Signed-off-by: Yong Zhao Reviewed-by: Zhan Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 16 ++-------------- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 16 ++-------------- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 4 ---- 3 files changed, 4 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 8d21325b5cbb..7832ec6e480b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -282,18 +282,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; } -static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) -{ - struct v10_compute_mqd *m; - - update_mqd(mm, mqd, q); - - /* TODO: what's the point? update_mqd already does this. */ - m = get_mqd(mqd); - m->cp_hqd_vmid = q->vmid; -} - static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -422,7 +410,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd_hiq_sdma; mqd->load_mqd = load_mqd; - mqd->update_mqd = update_mqd_hiq; + mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; mqd->mqd_size = sizeof(struct v10_compute_mqd); @@ -436,7 +424,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd; mqd->load_mqd = load_mqd; - mqd->update_mqd = update_mqd_hiq; + mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; mqd->mqd_size = sizeof(struct v10_compute_mqd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index df77d67ec9aa..aa9010995eaf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -325,18 +325,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; } -static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) -{ - struct v9_mqd *m; - - update_mqd(mm, mqd, q); - - /* TODO: what's the point? update_mqd already does this. */ - m = get_mqd(mqd); - m->cp_hqd_vmid = q->vmid; -} - static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -462,7 +450,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd_hiq_sdma; mqd->load_mqd = load_mqd; - mqd->update_mqd = update_mqd_hiq; + mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; mqd->mqd_size = sizeof(struct v9_mqd); @@ -475,7 +463,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd; mqd->load_mqd = load_mqd; - mqd->update_mqd = update_mqd_hiq; + mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; mqd->mqd_size = sizeof(struct v9_mqd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 3b6b5671964c..a5e8ff1e5945 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -312,11 +312,7 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, struct queue_properties *q) { - struct vi_mqd *m; __update_mqd(mm, mqd, q, MTYPE_UC, 0); - - m = get_mqd(mqd); - m->cp_hqd_vmid = q->vmid; } static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, -- cgit From 19efcb9e0684668437c5b2a80b643c7a4dfadca8 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Fri, 22 Nov 2019 11:42:51 +0800 Subject: drm/amd/powerplay: Use ARRAY_SIZE for smu7_profiling Fixes coccicheck warning: drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c:4946:28-29: WARNING: Use ARRAY_SIZE Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index f754fbd70f68..c3f586613c00 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4943,7 +4943,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7]); - len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting); + len = ARRAY_SIZE(smu7_profiling); for (i = 0; i < len; i++) { if (i == hwmgr->power_profile_mode) { -- cgit From e9c5dbc1a287735078b8dd3a0028458d9c09b158 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Fri, 22 Nov 2019 11:42:52 +0800 Subject: drm/amdgpu: Use ARRAY_SIZE for sos_old_versions Fixes coccicheck warning: drivers/gpu/drm/amd/amdgpu/psp_v3_1.c:182:40-41: WARNING: Use ARRAY_SIZE Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index b4d6427aaa79..735c43c7daab 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -179,7 +179,7 @@ static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver) * Double check if the latest four legacy versions. * If yes, it is still the right version. */ - for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) { + for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) { if (sos_old_versions[i] == adev->psp.sos_fw_version) return true; } -- cgit From ee9ea6d88593509f2bbfb8bd40482b21b6a7502c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 19 Nov 2019 15:54:17 -0500 Subject: drm/amd/display: add default clocks if not able to fetch them dm_pp_get_clock_levels_by_type needs to add the default clocks to the powerplay case as well. This was accidently dropped. Fixes: b3ea88fef321de ("drm/amd/powerplay: add get_clock_by_type interface for display") Bug: https://gitlab.freedesktop.org/drm/amd/issues/906 Reviewed-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 229788bee544..a2e1a73f66b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -342,7 +342,8 @@ bool dm_pp_get_clock_levels_by_type( if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) { if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, dc_to_pp_clock_type(clk_type), &pp_clks)) { - /* Error in pplib. Provide default values. */ + /* Error in pplib. Provide default values. */ + get_default_clock_levels(clk_type, dc_clks); return true; } } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) { -- cgit From 3d3f9ba8c4cde8cd69378b01979711d57c5feb19 Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Fri, 22 Nov 2019 14:15:43 -0600 Subject: drm/amdgpu: Apply noretry setting for mmhub9.4 Config the translation retry behavior according to noretry kernel parameter Signed-off-by: Oak Zeng Suggested-by: Jay Cornwall Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 753eea25b569..8599bfdb9a9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -314,7 +314,8 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) adev->vm_manager.block_size - 9); /* Send no-retry XNACK on fault to suppress VM fault storm. */ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, - RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + !amdgpu_noretry); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL, hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i, tmp); @@ -905,4 +906,4 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev, const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v9_4_query_ras_error_count, -}; \ No newline at end of file +}; -- cgit From 8633f126bf53f23b8de932ea01db65991f21d425 Mon Sep 17 00:00:00 2001 From: John Clements Date: Mon, 25 Nov 2019 18:24:17 +0800 Subject: drm/amdgpu: Resolved offchip EEPROM I/O issue Updated target I2C address Reviewed-by: Hawking Zhang Signed-off-by: John Clements Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 17 ++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h | 1 + 2 files changed, 13 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 7de16c0c2f20..2a8e04895595 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -27,7 +27,8 @@ #include #include "smu_v11_0_i2c.h" -#define EEPROM_I2C_TARGET_ADDR 0xA0 +#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8 +#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0 /* * The 2 macros bellow represent the actual size in bytes that @@ -83,7 +84,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, { int ret = 0; struct i2c_msg msg = { - .addr = EEPROM_I2C_TARGET_ADDR, + .addr = 0, .flags = 0, .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE, .buf = buff, @@ -93,6 +94,8 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, *(uint16_t *)buff = EEPROM_HDR_START; __encode_table_header_to_buff(&control->tbl_hdr, buff + EEPROM_ADDRESS_SIZE); + msg.addr = control->i2c_address; + ret = i2c_transfer(&control->eeprom_accessor, &msg, 1); if (ret < 1) DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret); @@ -203,7 +206,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; struct i2c_msg msg = { - .addr = EEPROM_I2C_TARGET_ADDR, + .addr = 0, .flags = I2C_M_RD, .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE, .buf = buff, @@ -213,10 +216,12 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) switch (adev->asic_type) { case CHIP_VEGA20: + control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20; ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor); break; case CHIP_ARCTURUS: + control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS; ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor); break; @@ -229,6 +234,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) return ret; } + msg.addr = control->i2c_address; + /* Read/Create table header from EEPROM address 0 */ ret = i2c_transfer(&control->eeprom_accessor, &msg, 1); if (ret < 1) { @@ -408,8 +415,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, * Update bits 16,17 of EEPROM address in I2C address by setting them * to bits 1,2 of Device address byte */ - msg->addr = EEPROM_I2C_TARGET_ADDR | - ((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15); + msg->addr = control->i2c_address | + ((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15); msg->flags = write ? 0 : I2C_M_RD; msg->len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE; msg->buf = buff; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index 622269957c1b..ca78f812d436 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -50,6 +50,7 @@ struct amdgpu_ras_eeprom_control { struct mutex tbl_mutex; bool bus_locked; uint32_t tbl_byte_sum; + uint16_t i2c_address; // 8-bit represented address }; /* -- cgit From 4c50d6f64958a17cca3d7dedcdf4fad54804655d Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Thu, 21 Nov 2019 21:29:30 +0800 Subject: drm/amd: Fix Kconfig indentation Adjust indentation from spaces to tab (+optional two spaces) as in coding style with command like: $ sed -e 's/^ /\t/' -i */Kconfig Reviewed-by: Alex Deucher Signed-off-by: Krzysztof Kozlowski Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/acp/Kconfig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index d968c2471412..19bae9100da4 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -2,11 +2,11 @@ menu "ACP (Audio CoProcessor) Configuration" config DRM_AMD_ACP - bool "Enable AMD Audio CoProcessor IP support" - depends on DRM_AMDGPU - select MFD_CORE - select PM_GENERIC_DOMAINS if PM - help + bool "Enable AMD Audio CoProcessor IP support" + depends on DRM_AMDGPU + select MFD_CORE + select PM_GENERIC_DOMAINS if PM + help Choose this option to enable ACP IP support for AMD SOCs. This adds the ACP (Audio CoProcessor) IP driver and wires it up into the amdgpu driver. The ACP block provides the DMA -- cgit From dde8c8dfd0b7dd61ada1fc1b8560dd974140ba3f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 22 Nov 2019 23:04:07 +0000 Subject: drm/amd/powerplay: remove redundant assignment to variables HiSidd and LoSidd The variables HiSidd and LoSidd are being initialized with values that are never read and are being updated a little later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 15590fd86ef4..868e2d5f6e62 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -653,8 +653,8 @@ static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr) static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); - uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; - uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + uint16_t HiSidd; + uint16_t LoSidd; struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); -- cgit From 07863a6cc2936f8cc4b5bfb506dec48531cc4948 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Sat, 23 Nov 2019 12:36:39 -0700 Subject: drm/amd/display: Use NULL for pointer assignment in copy_stream_update_to_stream Clang warns: ../drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:1965:26: warning: expression which evaluates to zero treated as a null pointer constant of type 'struct dc_dsc_config *' [-Wnon-literal-null-conversion] update->dsc_config = false; ^~~~~ ../drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:1971:25: warning: expression which evaluates to zero treated as a null pointer constant of type 'struct dc_dsc_config *' [-Wnon-literal-null-conversion] update->dsc_config = false; ^~~~~ 2 warnings generated. Fixes: f6fe4053b91f ("drm/amd/display: Use a temporary copy of the current state when updating DSC config") Link: https://github.com/ClangBuiltLinux/linux/issues/777 Reviewed-by: Harry Wentland Signed-off-by: Nathan Chancellor Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c7db4f4810c6..2645d20e8c4c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1962,13 +1962,13 @@ static void copy_stream_update_to_stream(struct dc *dc, if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { stream->timing.dsc_cfg = old_dsc_cfg; stream->timing.flags.DSC = old_dsc_enabled; - update->dsc_config = false; + update->dsc_config = NULL; } dc_release_state(dsc_validate_context); } else { DC_ERROR("Failed to allocate new validate context for DSC change\n"); - update->dsc_config = false; + update->dsc_config = NULL; } } } -- cgit From a63141e31764f8daf3f29e8e2d450dcf9199d1c8 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Sat, 23 Nov 2019 12:23:36 -0700 Subject: drm/amdgpu: Ensure ret is always initialized when using SOC15_WAIT_ON_RREG Commit b0f3cd3191cd ("drm/amdgpu: remove unnecessary JPEG2.0 code from VCN2.0") introduced a new clang warning in the vcn_v2_0_stop function: ../drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c:1082:2: warning: variable 'r' is used uninitialized whenever 'while' loop exits because its condition is false [-Wsometimes-uninitialized] SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../drivers/gpu/drm/amd/amdgpu/../amdgpu/soc15_common.h:55:10: note: expanded from macro 'SOC15_WAIT_ON_RREG' while ((tmp_ & (mask)) != (expected_value)) { \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c:1083:6: note: uninitialized use occurs here if (r) ^ ../drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c:1082:2: note: remove the condition if it is always true SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); ^ ../drivers/gpu/drm/amd/amdgpu/../amdgpu/soc15_common.h:55:10: note: expanded from macro 'SOC15_WAIT_ON_RREG' while ((tmp_ & (mask)) != (expected_value)) { \ ^ ../drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c:1072:7: note: initialize the variable 'r' to silence this warning int r; ^ = 0 1 warning generated. To prevent warnings like this from happening in the future, make the SOC15_WAIT_ON_RREG macro initialize its ret variable before the while loop that can time out. This macro's return value is always checked so it should set ret in both the success and fail path. Link: https://github.com/ClangBuiltLinux/linux/issues/776 Signed-off-by: Nathan Chancellor Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15_common.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 839f186e1182..19e870c79896 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -52,6 +52,7 @@ uint32_t old_ = 0; \ uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ uint32_t loop = adev->usec_timeout; \ + ret = 0; \ while ((tmp_ & (mask)) != (expected_value)) { \ if (old_ != tmp_) { \ loop = adev->usec_timeout; \ -- cgit From 70ebe8a48216ae2fda862cae47ff0b8af5b279b9 Mon Sep 17 00:00:00 2001 From: Timothy Pearson Date: Sun, 24 Nov 2019 13:15:16 -0600 Subject: amdgpu: Enable KFD on POWER systems KFD has been verified to function on POWER systems (Talos II / Vega 64). It should be available as a kernel configuration option on these systems. Signed-off-by: Timothy Pearson Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index a1a35d4d594b..ba0e68057a89 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -5,7 +5,7 @@ config HSA_AMD bool "HSA kernel driver for AMD GPU devices" - depends on DRM_AMDGPU && (X86_64 || ARM64) + depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64) imply AMD_IOMMU_V2 if X86_64 select MMU_NOTIFIER help -- cgit From ef2cb90a906a8537954ebe751e93ed3dbfae93c9 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 25 Nov 2019 22:54:45 +0800 Subject: drm/amd/display: remove set but not used variable 'msg_out' drivers/gpu/drm/amd/amdgpu/../display/modules/hdcp/hdcp_psp.c: In function mod_hdcp_hdcp2_enable_encryption: drivers/gpu/drm/amd/amdgpu/../display/modules/hdcp/hdcp_psp.c:633:77: warning: variable msg_out set but not used [-Wunused-but-set-variable] drivers/gpu/drm/amd/amdgpu/../display/modules/hdcp/hdcp_psp.c: In function mod_hdcp_hdcp2_enable_dp_stream_encryption: drivers/gpu/drm/amd/amdgpu/../display/modules/hdcp/hdcp_psp.c:710:77: warning: variable msg_out set but not used [-Wunused-but-set-variable] It is never used, so remove it. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 2dd5feec8e6c..468f5e6c3487 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -630,14 +630,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp) struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; - struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; struct mod_hdcp_display *display = get_first_added_display(hdcp); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; - msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; hdcp2_message_init(hdcp, msg_in); @@ -707,14 +705,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; - struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; uint8_t i; hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; - msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; hdcp2_message_init(hdcp, msg_in); -- cgit From 58e8c603b8fe4b52bf7236a5a253ef61350a19fe Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 25 Nov 2019 22:58:43 +0800 Subject: drm/amd/powerplay: remove set but not used variable 'stretch_amount2' drivers/gpu/drm/amd/amdgpu/../powerplay/smumgr/vegam_smumgr.c: In function vegam_populate_clock_stretcher_data_table: drivers/gpu/drm/amd/amdgpu/../powerplay/smumgr/vegam_smumgr.c:1489:29: warning: variable stretch_amount2 set but not used [-Wunused-but-set-variable] It is never used, so can be removed. Signed-off-by: YueHaibing Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 50896e9b2579..b0e0d67cd54b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -1486,7 +1486,7 @@ static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); - uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; + uint8_t i, stretch_amount, volt_offset = 0; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = @@ -1525,11 +1525,9 @@ static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5; /* Populate CKS Lookup Table */ - if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) - stretch_amount2 = 0; - else if (stretch_amount == 3 || stretch_amount == 4) - stretch_amount2 = 1; - else { + if (!(stretch_amount == 1 || stretch_amount == 2 || + stretch_amount == 5 || stretch_amount == 3 || + stretch_amount == 4)) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher); PP_ASSERT_WITH_CODE(false, -- cgit From 79c4ac0d759b6baaca22d7f37f6594a677030976 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Mon, 25 Nov 2019 10:34:13 -0500 Subject: drm/amd/display: Null check aconnector in event_property_validate [Why] previously event_property_validate was only called after we enabled the display. But after "Refactor HDCP to handle multiple displays per link" this function can be called at any time. In certain cases we don't have a aconnector [How] Null check aconnector and exit early. This is ok because we only need to check the ENABLED->DESIRED transition if a connector exists. Fixes: b1abe5586ffc ("drm/amd/display: Refactor HDCP to handle multiple displays per link") Signed-off-by: Bhawanpreet Lakha Reviewed-by: Zhan Liu Reviewed-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index f6864a51891a..ae329335dfcc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -225,6 +225,9 @@ static void event_property_validate(struct work_struct *work) struct mod_hdcp_display_query query; struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + if (!aconnector) + return; + mutex_lock(&hdcp_work->mutex); query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; -- cgit From b72ff1909cf3267e48b905e5bdd6e7e41d0a8593 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Mon, 25 Nov 2019 16:25:35 -0500 Subject: drm/amdgpu: Raise KFD unpinned system memory limit Allow KFD applications to use more unpinned system memory through HMM. Signed-off-by: Felix Kuehling Reviewed-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index a1ed8a8e3752..c2f92a7b56e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -85,7 +85,7 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, } /* Set memory usage limits. Current, limits are - * System (TTM + userptr) memory - 3/4th System RAM + * System (TTM + userptr) memory - 15/16th System RAM * TTM memory - 3/8th System RAM */ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) @@ -98,7 +98,7 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) mem *= si.mem_unit; spin_lock_init(&kfd_mem_limit.mem_limit_lock); - kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2); + kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", (kfd_mem_limit.max_system_mem_limit >> 20), -- cgit From 9f890f3044c79d18bce1f3a3f75037b93e71b880 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Mon, 15 Jul 2019 16:18:03 -0400 Subject: drm/amdgpu: Optimize KFD page table reservation Be less pessimistic about estimated page table use for KFD. Most allocations use 2MB pages and therefore need less VRAM for page tables. This allows more VRAM to be used for applications especially on large systems with many GPUs and hundreds of GB of system memory. Example: 8 GPUs with 32GB VRAM each + 256GB system memory = 512GB Old page table reservation per GPU: 1GB New page table reservation per GPU: 32MB Signed-off-by: Felix Kuehling Reviewed-by: xinhui pan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index c2f92a7b56e4..b6d1958d514f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -105,11 +105,24 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) (kfd_mem_limit.max_ttm_mem_limit >> 20)); } +/* Estimate page table size needed to represent a given memory size + * + * With 4KB pages, we need one 8 byte PTE for each 4KB of memory + * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB + * of memory (factor 256K, >> 18). ROCm user mode tries to optimize + * for 2MB pages for TLB efficiency. However, small allocations and + * fragmented system memory still need some 4KB pages. We choose a + * compromise that should work in most cases without reserving too + * much memory for page tables unnecessarily (factor 16K, >> 14). + */ +#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) + static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 domain, bool sg) { + uint64_t reserved_for_pt = + ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; - uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9; int ret = 0; acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, -- cgit From e416fdb6a36e873434ac859f73fed487250cda91 Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Tue, 26 Nov 2019 14:47:29 +0800 Subject: drm/amd/amdgpu/sriov skip jpeg ip block for ARCTURUS VF Currently ARCTURUS VF doesn't support jpeg ip block. Skip jpeg ip block in case guest driver load fail. Signed-off-by: Jack Zhang Reviewed-by: Zhexi Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 951327f759fa..48649f55f3ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -834,7 +834,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); break; case CHIP_RENOIR: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); -- cgit From be3e73ea7def13abbb481844aaaa073903e18b03 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 20 Nov 2019 19:21:35 +0800 Subject: drm/amdgpu: apply gpr/gds workaround before enabling GFX EDC mode gfx memory should be initialized before enabling DED and FUE field in mmGB_EDC_MODE Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f6e39b332762..cc8dab5f5b55 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4235,10 +4235,6 @@ static int gfx_v9_0_ecc_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_gfx_ras_late_init(adev); - if (r) - return r; - r = gfx_v9_0_do_edc_gds_workarounds(adev); if (r) return r; @@ -4248,6 +4244,10 @@ static int gfx_v9_0_ecc_late_init(void *handle) if (r) return r; + r = amdgpu_gfx_ras_late_init(adev); + if (r) + return r; + return 0; } -- cgit From adc8139e687ed58548a92cda541d10c57212cd38 Mon Sep 17 00:00:00 2001 From: Zhan liu Date: Mon, 25 Nov 2019 17:25:18 -0500 Subject: drm/amd/display: Modify comments to match the code [Why] This line of code was modified. However, comments remained unchanged. As a result, comments and code are mismatching. [How] Modifying comments to reflect code. At the same time, explaining why the value was changed from 200ms to 3000ms. Signed-off-by: Zhan Liu Reviewed-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_helper.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 8da4576e2908..e41befa067ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -485,7 +485,12 @@ void generic_reg_wait(const struct dc_context *ctx, return; } - /* something is terribly wrong if time out is > 200ms. (5Hz) */ + /* + * Something is terribly wrong if time out is > 3000ms. + * 3000ms is the maximum time needed for SMU to pass values back. + * This value comes from experiments. + * + */ ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); for (i = 0; i <= time_out_num_tries; i++) { -- cgit From de185019bcb9d824d3dd5a80746571e83644b636 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 20 Nov 2019 17:31:11 -0500 Subject: drm/amdgpu: move pci handling out of pm ops The documentation says the that PCI core handles this for you unless you choose to implement it. Just rely on the PCI core to handle the pci specific bits. Reviewed-by: Zhan Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 +++++++++++++----------------- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 16 +++++++-------- 3 files changed, 24 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index dd961f0a5a75..145a2efd560a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1185,8 +1185,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); int amdgpu_device_ip_suspend(struct amdgpu_device *adev); -int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); -int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); +int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); +int amdgpu_device_resume(struct drm_device *dev, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d472526d97b9..aae8a29f48ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1087,6 +1087,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); + int r; if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF) return; @@ -1096,7 +1097,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_device_resume(dev, true, true); + pci_set_power_state(dev->pdev, PCI_D0); + pci_restore_state(dev->pdev); + r = pci_enable_device(dev->pdev); + if (r) + DRM_WARN("pci_enable_device failed (%d)\n", r); + amdgpu_device_resume(dev, true); dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); @@ -1104,7 +1110,11 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero pr_info("amdgpu: switched off\n"); drm_kms_helper_poll_disable(dev); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_device_suspend(dev, true, true); + amdgpu_device_suspend(dev, true); + pci_save_state(dev->pdev); + /* Shut down the device */ + pci_disable_device(dev->pdev); + pci_set_power_state(dev->pdev, PCI_D3cold); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -3195,7 +3205,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) * Returns 0 for success or an error on failure. * Called at driver suspend. */ -int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) +int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) { struct amdgpu_device *adev; struct drm_crtc *crtc; @@ -3278,13 +3288,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - if (suspend) { - pci_save_state(dev->pdev); - /* Shut down the device */ - pci_disable_device(dev->pdev); - pci_set_power_state(dev->pdev, PCI_D3hot); - } - return 0; } @@ -3299,7 +3302,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) * Returns 0 for success or an error on failure. * Called at driver resume. */ -int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) +int amdgpu_device_resume(struct drm_device *dev, bool fbcon) { struct drm_connector *connector; struct drm_connector_list_iter iter; @@ -3310,14 +3313,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (resume) { - pci_set_power_state(dev->pdev, PCI_D0); - pci_restore_state(dev->pdev); - r = pci_enable_device(dev->pdev); - if (r) - return r; - } - /* post card */ if (amdgpu_device_need_post(adev)) { r = amdgpu_atom_asic_init(adev->mode_info.atom_context); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 459bca6514e6..3f6f14ce1511 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1147,7 +1147,7 @@ static int amdgpu_pmops_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_suspend(drm_dev, true, true); + return amdgpu_device_suspend(drm_dev, true); } static int amdgpu_pmops_resume(struct device *dev) @@ -1162,7 +1162,7 @@ static int amdgpu_pmops_resume(struct device *dev) pm_runtime_enable(dev); } - return amdgpu_device_resume(drm_dev, true, true); + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_freeze(struct device *dev) @@ -1171,7 +1171,7 @@ static int amdgpu_pmops_freeze(struct device *dev) struct amdgpu_device *adev = drm_dev->dev_private; int r; - r = amdgpu_device_suspend(drm_dev, false, true); + r = amdgpu_device_suspend(drm_dev, true); if (r) return r; return amdgpu_asic_reset(adev); @@ -1181,21 +1181,21 @@ static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_resume(drm_dev, false, true); + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_poweroff(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_suspend(drm_dev, true, true); + return amdgpu_device_suspend(drm_dev, true); } static int amdgpu_pmops_restore(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_resume(drm_dev, false, true); + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_runtime_suspend(struct device *dev) @@ -1214,7 +1214,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(drm_dev); - ret = amdgpu_device_suspend(drm_dev, false, false); + ret = amdgpu_device_suspend(drm_dev, false); if (amdgpu_device_supports_boco(drm_dev)) { /* Only need to handle PCI state in the driver for ATPX * PCI core handles it for _PR3. @@ -1264,7 +1264,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) } else if (amdgpu_device_supports_baco(drm_dev)) { amdgpu_device_baco_exit(drm_dev); } - ret = amdgpu_device_resume(drm_dev, false, false); + ret = amdgpu_device_resume(drm_dev, false); drm_kms_helper_poll_enable(drm_dev); if (amdgpu_device_supports_boco(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; -- cgit From 5149f08275de88ed6aade660e6ec5620962fa7a6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 25 Nov 2019 11:11:18 -0500 Subject: drm/amdgpu: flag vram lost on baco reset for VI/CIK MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit VI/CIK BACO was inflight when this fix landed for SOC15/NV. Add the fix to VI/CIK as well. Acked-by: Evan Quan Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cik.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/vi.c | 7 +++++-- 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 968bc706b94d..1dfe4a1337cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1363,10 +1363,13 @@ static int cik_asic_reset(struct amdgpu_device *adev) { int r; - if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) + if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + if (!adev->in_suspend) + amdgpu_inc_vram_lost(adev); r = smu7_asic_baco_reset(adev); - else + } else { r = cik_asic_pci_config_reset(adev); + } return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 871c0b8c6b0b..e4f4201b3c34 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -804,10 +804,13 @@ static int vi_asic_reset(struct amdgpu_device *adev) { int r; - if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) + if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + if (!adev->in_suspend) + amdgpu_inc_vram_lost(adev); r = smu7_asic_baco_reset(adev); - else + } else { r = vi_asic_pci_config_reset(adev); + } return r; } -- cgit From f4618fe9c26d749613d53ee9d661b6baf9af3b12 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 26 Nov 2019 15:10:29 +0300 Subject: drm/amdgpu: Fix a bug in jpeg_v1_0_start() Originally the last WREG32_SOC15() was a part of the if statement block but the curly braces are on the wrong line. Fixes: bb0db70f3f75 ("drm/amdgpu: separate JPEG1.0 code out from VCN1.0") Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 553506df077d..a141408dfb23 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -522,7 +522,8 @@ void jpeg_v1_0_start(struct amdgpu_device *adev, int mode) WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0); WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0); - } WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); + } /* initialize wptr */ ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); -- cgit From a5e82d0b95051a01c5e8473f6969eb69a38b40cf Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 6 Nov 2019 21:10:20 +0800 Subject: drm/amdgpu/gfx10: unlock srbm_mutex after queue programming finish srbm_mutex is to guarantee atomicity for r/w of gfx indexed registers Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index ca5f0e7ea1ac..208fb9cd1482 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -2825,7 +2825,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) /* Init gfx ring 0 for pipe 0 */ mutex_lock(&adev->srbm_mutex); gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0); - mutex_unlock(&adev->srbm_mutex); + /* Set ring buffer size */ ring = &adev->gfx.gfx_ring[0]; rb_bufsz = order_base_2(ring->ring_size / 8); @@ -2863,11 +2863,11 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1); gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + mutex_unlock(&adev->srbm_mutex); /* Init gfx ring 1 for pipe 1 */ mutex_lock(&adev->srbm_mutex); gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); - mutex_unlock(&adev->srbm_mutex); ring = &adev->gfx.gfx_ring[1]; rb_bufsz = order_base_2(ring->ring_size / 8); tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); @@ -2897,6 +2897,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + mutex_unlock(&adev->srbm_mutex); /* Switch to pipe 0 */ mutex_lock(&adev->srbm_mutex); -- cgit From 858054f7617bde2df8969e999f580ff37faacb70 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 6 Nov 2019 21:08:06 +0800 Subject: drm/amdgpu/gfx10: remove outdated comments Signed-off-by: Xiaojie Yuan Reviewed-by: Zhan Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 208fb9cd1482..82ebc17c9037 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -50,9 +50,6 @@ * Navi10 has two graphic rings to share each graphic pipe. * 1. Primary ring * 2. Async ring - * - * In bring-up phase, it just used primary ring so set gfx ring number as 1 at - * first. */ #define GFX10_NUM_GFX_RINGS 2 #define GFX10_MEC_HPD_SIZE 2048 -- cgit From 2255d7f36e479646244cd87906a6b87eae73711d Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 26 Nov 2019 14:23:10 -0500 Subject: drm/amdgpu/gfx: Clear more EDC cnt Clear SDMA and HDP EDC counter in GPR workarounds. Signed-off-by: James Zhu Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index cc8dab5f5b55..c1bffd906fb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -48,6 +48,8 @@ #include "amdgpu_ras.h" +#include "sdma0/sdma0_4_0_offset.h" +#include "sdma1/sdma1_4_0_offset.h" #define GFX9_NUM_GFX_RINGS 1 #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L @@ -4029,6 +4031,9 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2}, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6}, + { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), 0, 1, 1}, + { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_EDC_COUNTER), 0, 1, 1}, + { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1}, }; static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev) -- cgit From 45317d5ffb4d9bc842ef1f6119bc0b9e36c95f56 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 26 Nov 2019 14:27:46 -0500 Subject: drm/amdgpu/gfx: Increase dispatch packet number For Arcturus, increase dispatch packet number to stress scheduler. Signed-off-by: James Zhu Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c1bffd906fb4..047453c16a6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4146,7 +4146,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 128; /* x */ + ib.ptr[ib.length_dw++] = 256; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4174,7 +4174,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 128; /* x */ + ib.ptr[ib.length_dw++] = 256; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = -- cgit From 869aebc7baa5b9bc65bde29690de311a26b2681b Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 26 Nov 2019 19:40:08 +0800 Subject: drm/amdgpu: use CPU to flush vmhub if sched stopped MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit otherwse the flush_gpu_tlb will hang if we unload the KMD becuase the schedulers already stopped Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 321f8a997be8..232469507446 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -326,7 +326,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready || - adev->in_gpu_reset) { + adev->in_gpu_reset || + ring->sched.ready == false) { gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); mutex_unlock(&adev->mman.gtt_window_lock); return; -- cgit From 6bb27085aaa973eac31058a2534d73efe50b3a00 Mon Sep 17 00:00:00 2001 From: Zhan Liu Date: Thu, 28 Nov 2019 14:12:11 -0500 Subject: drm/amd/display: Include num_vmid and num_dsc within NV14's resource caps [Why] "num_vmid" and "num_dsc" are missing within NV14's resource caps structure. [How] Add the missing parts. Signed-off-by: Zhan Liu Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 328d10f6fbfe..da7a92fc0909 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -847,6 +847,8 @@ static const struct resource_caps res_cap_nv14 = { .num_pll = 5, .num_dwb = 1, .num_ddc = 5, + .num_vmid = 16, + .num_dsc = 5, }; static const struct dc_debug_options debug_defaults_drv = { -- cgit From 5bbbc9b77831427609e4fcfb0a2b5961af3a1156 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 28 Nov 2019 11:30:10 -0500 Subject: drm/amd/display: Drop AMD_EDID_UTILITY defines We don't use this upstream in the Linux kernel. Signed-off-by: Harry Wentland Reviewed-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dsc.h | 2 -- drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 8 -------- drivers/gpu/drm/amd/display/dc/dc_types.h | 4 ---- 3 files changed, 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index a782ae18a1c5..cc9915e545cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -41,10 +41,8 @@ struct dc_dsc_bw_range { struct display_stream_compressor { const struct dsc_funcs *funcs; -#ifndef AMD_EDID_UTILITY struct dc_context *ctx; int inst; -#endif }; bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 86043d431d40..25c50bcab9e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -26,8 +26,6 @@ #ifndef DC_HW_TYPES_H #define DC_HW_TYPES_H -#ifndef AMD_EDID_UTILITY - #include "os_types.h" #include "fixed31_32.h" #include "signal_types.h" @@ -584,8 +582,6 @@ struct scaling_taps { bool integer_scaling; }; -#endif /* AMD_EDID_UTILITY */ - enum dc_timing_standard { DC_TIMING_STANDARD_UNDEFINED, DC_TIMING_STANDARD_DMT, @@ -742,8 +738,6 @@ struct dc_crtc_timing { struct dc_dsc_config dsc_cfg; }; -#ifndef AMD_EDID_UTILITY - enum trigger_delay { TRIGGER_DELAY_NEXT_PIXEL = 0, TRIGGER_DELAY_NEXT_LINE, @@ -837,7 +831,5 @@ struct tg_color { uint16_t color_b_cb; }; -#endif /* AMD_EDID_UTILITY */ - #endif /* DC_HW_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 1363e8907fbf..2b92bfa28bde 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -25,7 +25,6 @@ #ifndef DC_TYPES_H_ #define DC_TYPES_H_ -#ifndef AMD_EDID_UTILITY /* AND EdidUtility only needs a portion * of this file, including the rest only * causes additional issues. @@ -781,9 +780,6 @@ struct dc_clock_config { uint32_t current_clock_khz;/*current clock in use*/ }; -#endif /*AMD_EDID_UTILITY*/ -//AMD EDID UTILITY does not need any of the above structures - /* DSC DPCD capabilities */ union dsc_slice_caps1 { struct { -- cgit From 82a829dc8c2bb03cc9b7e5beb1c5479aa3ba7831 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 26 Nov 2019 19:42:25 +0800 Subject: drm/amdgpu: fix calltrace during kmd unload(v3) issue: kernel would report a warning from a double unpin during the driver unloading on the CSB bo why: we unpin it during hw_fini, and there will be another unpin in sw_fini on CSB bo. fix: actually we don't need to pin/unpin it during hw_init/fini since it is created with kernel pinned, we only need to fullfill the CSB again during hw_init to prevent CSB/VRAM lost after S3 v2: get_csb in init_rlc so hw_init() will make CSIB content back even after reset or s3 v3: use bo_create_kernel instead of bo_create_reserved for CSB otherwise the bo_free_kernel() on CSB is not aligned and would lead to its internal reserve pending there forever take care of gfx7/8 as well Signed-off-by: Monk Liu Reviewed-by: Hawking Zhang Reviewed-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 10 +----- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 58 +-------------------------------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 40 +---------------------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 40 +---------------------- 5 files changed, 6 insertions(+), 144 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index c8793e6cc3c5..6373bfb47d55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -124,13 +124,12 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) */ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) { - volatile u32 *dst_ptr; u32 dws; int r; /* allocate clear state block */ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); - r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, @@ -141,13 +140,6 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) return r; } - /* set up the cs buffer */ - dst_ptr = adev->gfx.rlc.cs_ptr; - adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr); - amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 82ebc17c9037..e200ba5659d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -990,39 +990,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) return 0; } -static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v10_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -1784,25 +1751,7 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, static int gfx_v10_0_init_csb(struct amdgpu_device *adev) { - int r; - - if (adev->in_gpu_reset) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (r) - return r; - - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, - (void **)&adev->gfx.rlc.cs_ptr); - if (!r) { - adev->gfx.rlc.funcs->get_csb_buffer(adev, - adev->gfx.rlc.cs_ptr); - amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); - } - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - if (r) - return r; - } + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, @@ -3773,10 +3722,6 @@ static int gfx_v10_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gfx_v10_0_csb_vram_pin(adev); - if (r) - return r; - if (!amdgpu_emu_mode) gfx_v10_0_init_golden_registers(adev); @@ -3864,7 +3809,6 @@ static int gfx_v10_0_hw_fini(void *handle) } gfx_v10_0_cp_enable(adev, false); gfx_v10_0_enable_gui_idle_interrupt(adev, false); - gfx_v10_0_csb_vram_unpin(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 58b7ef97bff5..8f20a5dd44fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4553,6 +4553,8 @@ static int gfx_v7_0_hw_init(void *handle) gfx_v7_0_constants_init(adev); + /* init CSB */ + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* init rlc */ r = adev->gfx.rlc.funcs->resume(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 14e774d52727..9b0b22522e83 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1321,39 +1321,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return 0; } -static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -3916,6 +3883,7 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, static void gfx_v8_0_init_csb(struct amdgpu_device *adev) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ WREG32(mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); @@ -4836,10 +4804,6 @@ static int gfx_v8_0_hw_init(void *handle) gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev); - r = gfx_v8_0_csb_vram_pin(adev); - if (r) - return r; - r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -4957,8 +4921,6 @@ static int gfx_v8_0_hw_fini(void *handle) pr_err("rlc is busy, skip halt rlc\n"); amdgpu_gfx_rlc_exit_safe_mode(adev); - gfx_v8_0_csb_vram_unpin(adev); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 047453c16a6b..1743fe281354 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1685,39 +1685,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) return 0; } -static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -2405,6 +2372,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, static void gfx_v9_0_init_csb(struct amdgpu_device *adev) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI), adev->gfx.rlc.clear_state_gpu_addr >> 32); @@ -3696,10 +3664,6 @@ static int gfx_v9_0_hw_init(void *handle) gfx_v9_0_constants_init(adev); - r = gfx_v9_0_csb_vram_pin(adev); - if (r) - return r; - r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -3781,8 +3745,6 @@ static int gfx_v9_0_hw_fini(void *handle) gfx_v9_0_cp_enable(adev, false); adev->gfx.rlc.funcs->stop(adev); - gfx_v9_0_csb_vram_unpin(adev); - return 0; } -- cgit From 1797ec7ffd1b4d41b078a4e807225827caf39e60 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 26 Nov 2019 19:36:29 +0800 Subject: drm/amdgpu: skip rlc ucode loading for SRIOV gfx10 Signed-off-by: Monk Liu Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 80 +++++++++++++++++----------------- 1 file changed, 41 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index e200ba5659d1..60c95633f3ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -687,59 +687,61 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); - err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.rlc_fw); - rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; - version_major = le16_to_cpu(rlc_hdr->header.header_version_major); - version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); - if (version_major == 2 && version_minor == 1) - adev->gfx.rlc.is_rlc_v2_1 = true; - - adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); - adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); - adev->gfx.rlc.save_and_restore_offset = + if (!amdgpu_sriov_vf(adev)) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); + err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + version_major = le16_to_cpu(rlc_hdr->header.header_version_major); + version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); + if (version_major == 2 && version_minor == 1) + adev->gfx.rlc.is_rlc_v2_1 = true; + + adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); + adev->gfx.rlc.save_and_restore_offset = le32_to_cpu(rlc_hdr->save_and_restore_offset); - adev->gfx.rlc.clear_state_descriptor_offset = + adev->gfx.rlc.clear_state_descriptor_offset = le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); - adev->gfx.rlc.avail_scratch_ram_locations = + adev->gfx.rlc.avail_scratch_ram_locations = le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); - adev->gfx.rlc.reg_restore_list_size = + adev->gfx.rlc.reg_restore_list_size = le32_to_cpu(rlc_hdr->reg_restore_list_size); - adev->gfx.rlc.reg_list_format_start = + adev->gfx.rlc.reg_list_format_start = le32_to_cpu(rlc_hdr->reg_list_format_start); - adev->gfx.rlc.reg_list_format_separate_start = + adev->gfx.rlc.reg_list_format_separate_start = le32_to_cpu(rlc_hdr->reg_list_format_separate_start); - adev->gfx.rlc.starting_offsets_start = + adev->gfx.rlc.starting_offsets_start = le32_to_cpu(rlc_hdr->starting_offsets_start); - adev->gfx.rlc.reg_list_format_size_bytes = + adev->gfx.rlc.reg_list_format_size_bytes = le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); - adev->gfx.rlc.reg_list_size_bytes = + adev->gfx.rlc.reg_list_size_bytes = le32_to_cpu(rlc_hdr->reg_list_size_bytes); - adev->gfx.rlc.register_list_format = + adev->gfx.rlc.register_list_format = kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + - adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); - if (!adev->gfx.rlc.register_list_format) { - err = -ENOMEM; - goto out; - } + adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); + if (!adev->gfx.rlc.register_list_format) { + err = -ENOMEM; + goto out; + } - tmp = (unsigned int *)((uintptr_t)rlc_hdr + - le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); - for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) - adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); + tmp = (unsigned int *)((uintptr_t)rlc_hdr + + le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); + for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) + adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); - adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; + adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; - tmp = (unsigned int *)((uintptr_t)rlc_hdr + - le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); - for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) - adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); + tmp = (unsigned int *)((uintptr_t)rlc_hdr + + le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); + for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) + adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); - if (adev->gfx.rlc.is_rlc_v2_1) - gfx_v10_0_init_rlc_ext_microcode(adev); + if (adev->gfx.rlc.is_rlc_v2_1) + gfx_v10_0_init_rlc_ext_microcode(adev); + } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); -- cgit From 6de40f02b355ae4c449acdaae19bac1ee380a01d Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 26 Nov 2019 19:38:22 +0800 Subject: drm/amdgpu: do autoload right after MEC loaded for SRIOV VF since we don't have RLCG ucode loading and no SRlist as well Signed-off-by: Monk Liu Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 96a6b00eeede..525a8dd43f1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1490,8 +1490,8 @@ out: return ret; /* Start rlc autoload after psp recieved all the gfx firmware */ - if (psp->autoload_supported && ucode->ucode_id == - AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { + if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? + AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) { ret = psp_rlc_autoload(psp); if (ret) { DRM_ERROR("Failed to start rlc autoload\n"); -- cgit From eb529b8e46b87c1f89deeb1e736cf704948326e3 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 29 Nov 2019 16:20:51 +0800 Subject: drm/amdgpu: should stop GFX ring in hw_fini To align with the scheme from gfx9 disabling GFX ring after VM shutdown could avoid garbage data be fetched to GFX RB which may lead to unnecessary screw up on GFX Signed-off-by: Monk Liu Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 60c95633f3ce..675ce8f1cab7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3806,7 +3806,7 @@ static int gfx_v10_0_hw_fini(void *handle) if (amdgpu_gfx_disable_kcq(adev)) DRM_ERROR("KCQ disable failed\n"); if (amdgpu_sriov_vf(adev)) { - pr_debug("For SRIOV client, shouldn't do anything.\n"); + gfx_v10_0_cp_gfx_enable(adev, false); return 0; } gfx_v10_0_cp_enable(adev, false); -- cgit From d5939e4db5e6b8e3fe9707edb08d705bf4754cea Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 26 Nov 2019 19:33:38 +0800 Subject: drm/amdgpu: fix GFX10 missing CSIB set(v3) still need to init csb even for SRIOV v2: drop init_pg() for gfx10 at all since PG and GFX off feature will be fully controled by RLC and SMU fw for gfx10 v3: drop the flush_gpu_tlb lines since we consider it is only usefull in emulation Signed-off-by: Monk Liu Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 33 +++++---------------------------- 1 file changed, 5 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 675ce8f1cab7..5744b212db86 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1765,22 +1765,6 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev) return 0; } -static int gfx_v10_0_init_pg(struct amdgpu_device *adev) -{ - int i; - int r; - - r = gfx_v10_0_init_csb(adev); - if (r) - return r; - - for (i = 0; i < adev->num_vmhubs; i++) - amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); - - /* TODO: init power gating */ - return 0; -} - void gfx_v10_0_rlc_stop(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL); @@ -1873,21 +1857,16 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) { int r; - if (amdgpu_sriov_vf(adev)) - return 0; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - r = gfx_v10_0_wait_for_rlc_autoload_complete(adev); - if (r) - return r; - r = gfx_v10_0_init_pg(adev); + r = gfx_v10_0_wait_for_rlc_autoload_complete(adev); if (r) return r; - /* enable RLC SRM */ - gfx_v10_0_rlc_enable_srm(adev); + gfx_v10_0_init_csb(adev); + if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ + gfx_v10_0_rlc_enable_srm(adev); } else { adev->gfx.rlc.funcs->stop(adev); @@ -1909,9 +1888,7 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) return r; } - r = gfx_v10_0_init_pg(adev); - if (r) - return r; + gfx_v10_0_init_csb(adev); adev->gfx.rlc.funcs->start(adev); -- cgit From 7c868b592d5c2c9d54c8ab92624efcf2d3d41bc6 Mon Sep 17 00:00:00 2001 From: Yintian Tao Date: Fri, 29 Nov 2019 16:05:55 +0800 Subject: drm/amdgpu: not remove sysfs if not create sysfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When load amdgpu failed before create pm_sysfs and ucode_sysfs, the pm_sysfs and ucode_sysfs should not be removed. Otherwise, there will be warning call trace just like below. [ 24.836386] [drm] VCE initialized successfully. [ 24.841352] amdgpu 0000:00:07.0: amdgpu_device_ip_init failed [ 25.370383] amdgpu 0000:00:07.0: Fatal error during GPU init [ 25.889575] [drm] amdgpu: finishing device. [ 26.069128] amdgpu 0000:00:07.0: [drm:amdgpu_ring_test_helper [amdgpu]] *ERROR* ring kiq_2.1.0 test failed (-110) [ 26.070110] [drm:gfx_v9_0_hw_fini [amdgpu]] *ERROR* KCQ disable failed [ 26.200309] [TTM] Finalizing pool allocator [ 26.200314] [TTM] Finalizing DMA pool allocator [ 26.200349] [TTM] Zone kernel: Used memory at exit: 0 KiB [ 26.200351] [TTM] Zone dma32: Used memory at exit: 0 KiB [ 26.200353] [drm] amdgpu: ttm finalized [ 26.205329] ------------[ cut here ]------------ [ 26.205330] sysfs group 'fw_version' not found for kobject '0000:00:07.0' [ 26.205347] WARNING: CPU: 0 PID: 1228 at fs/sysfs/group.c:256 sysfs_remove_group+0x80/0x90 [ 26.205348] Modules linked in: amdgpu(OE+) gpu_sched(OE) ttm(OE) drm_kms_helper(OE) drm(OE) i2c_algo_bit fb_sys_fops syscopyarea sysfillrect sysimgblt rpcsec_gss_krb5 auth_rpcgss nfsv4 nfs lockd grace fscache binfmt_misc snd_hda_codec_generic ledtrig_audio crct10dif_pclmul snd_hda_intel crc32_pclmul snd_hda_codec ghash_clmulni_intel snd_hda_core snd_hwdep snd_pcm snd_timer input_leds snd joydev soundcore serio_raw pcspkr evbug aesni_intel aes_x86_64 crypto_simd cryptd mac_hid glue_helper sunrpc ip_tables x_tables autofs4 8139too psmouse 8139cp mii i2c_piix4 pata_acpi floppy [ 26.205369] CPU: 0 PID: 1228 Comm: modprobe Tainted: G OE 5.2.0-rc1 #1 [ 26.205370] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 [ 26.205372] RIP: 0010:sysfs_remove_group+0x80/0x90 [ 26.205374] Code: e8 35 b9 ff ff 5b 41 5c 41 5d 5d c3 48 89 df e8 f6 b5 ff ff eb c6 49 8b 55 00 49 8b 34 24 48 c7 c7 48 7a 70 98 e8 60 63 d3 ff <0f> 0b eb d7 66 90 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 55 [ 26.205375] RSP: 0018:ffffbee242b0b908 EFLAGS: 00010282 [ 26.205376] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000006 [ 26.205377] RDX: 0000000000000007 RSI: 0000000000000092 RDI: ffff97ad6f817380 [ 26.205377] RBP: ffffbee242b0b920 R08: ffffffff98f520c4 R09: 00000000000002b3 [ 26.205378] R10: ffffbee242b0b8f8 R11: 00000000000002b3 R12: ffffffffc0e58240 [ 26.205379] R13: ffff97ad6d1fe0b0 R14: ffff97ad4db954c8 R15: ffff97ad4db7fff0 [ 26.205380] FS: 00007ff3d8a1c4c0(0000) GS:ffff97ad6f800000(0000) knlGS:0000000000000000 [ 26.205381] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 26.205381] CR2: 00007f9b2ef1df04 CR3: 000000042aab8001 CR4: 00000000003606f0 [ 26.205384] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 26.205385] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 26.205385] Call Trace: [ 26.205461] amdgpu_ucode_sysfs_fini+0x18/0x20 [amdgpu] [ 26.205518] amdgpu_device_fini+0x3b4/0x560 [amdgpu] [ 26.205573] amdgpu_driver_unload_kms+0x4f/0xa0 [amdgpu] [ 26.205623] amdgpu_driver_load_kms+0xcd/0x250 [amdgpu] [ 26.205637] drm_dev_register+0x12b/0x1c0 [drm] [ 26.205695] amdgpu_pci_probe+0x12a/0x1e0 [amdgpu] [ 26.205699] local_pci_probe+0x47/0xa0 [ 26.205701] pci_device_probe+0x106/0x1b0 [ 26.205704] really_probe+0x21a/0x3f0 [ 26.205706] driver_probe_device+0x11c/0x140 [ 26.205707] device_driver_attach+0x58/0x60 [ 26.205709] __driver_attach+0xc3/0x140 Signed-off-by: Yintian Tao Acked-by: Christian König Reviewed-by: Nirmoy Das Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 16 ++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 145a2efd560a..2e5b2f467911 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -989,6 +989,9 @@ struct amdgpu_device { int pstate; /* enable runtime pm on the device */ bool runpm; + + bool pm_sysfs_en; + bool ucode_sysfs_en; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index aae8a29f48ad..baee37c92352 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3041,12 +3041,18 @@ fence_driver_init: amdgpu_pm_virt_sysfs_init(adev); r = amdgpu_pm_sysfs_init(adev); - if (r) + if (r) { + adev->pm_sysfs_en = false; DRM_ERROR("registering pm debugfs failed (%d).\n", r); + } else + adev->pm_sysfs_en = true; r = amdgpu_ucode_sysfs_init(adev); - if (r) + if (r) { + adev->ucode_sysfs_en = false; DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); + } else + adev->ucode_sysfs_en = true; r = amdgpu_debugfs_gem_init(adev); if (r) @@ -3146,7 +3152,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) drm_atomic_helper_shutdown(adev->ddev); } amdgpu_fence_driver_fini(adev); - amdgpu_pm_sysfs_fini(adev); + if (adev->pm_sysfs_en) + amdgpu_pm_sysfs_fini(adev); amdgpu_fbdev_fini(adev); r = amdgpu_device_ip_fini(adev); if (adev->firmware.gpu_info_fw) { @@ -3182,7 +3189,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_debugfs_regs_cleanup(adev); device_remove_file(adev->dev, &dev_attr_pcie_replay_count); - amdgpu_ucode_sysfs_fini(adev); + if (adev->ucode_sysfs_en) + amdgpu_ucode_sysfs_fini(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); amdgpu_debugfs_preempt_cleanup(adev); -- cgit From a7f4ba7a6c2f0962a0db37743586c1e4a95ca72d Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Fri, 8 Nov 2019 16:57:21 -0500 Subject: drm/amd/display: Load TA firmware for navi10/12/14 load the ta firmware for navi10/12/14. This is already being done for raven/picasso and is needed for supporting hdcp on navi Signed-off-by: Bhawanpreet Lakha Acked-by: Alex Deucher Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 2eaa197a9404..c66ca8cc2ebd 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -186,6 +186,31 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out2; + + ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; + adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version); + adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes); + adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); + + adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version); + adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes); + adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr + + le32_to_cpu(ta_hdr->ta_dtm_offset_bytes); + } break; default: BUG(); -- cgit From 031514956bd06156897b54a80db66acf7c1d9ebd Mon Sep 17 00:00:00 2001 From: John Clements Date: Mon, 2 Dec 2019 17:57:25 +0800 Subject: drm/amdgpu: Added ASIC specific checks in gfxhub V1.1 get XGMI info Added max hive/node info checks per supported ASIC Reviewed-by: Hawking Zhang Signed-off-by: John Clements Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c index 5e9ab8eb214a..c0ab71df0d90 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c @@ -33,16 +33,31 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL); u32 max_region = REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); + u32 max_num_physical_nodes = 0; + u32 max_physical_node_id = 0; + + switch (adev->asic_type) { + case CHIP_VEGA20: + max_num_physical_nodes = 4; + max_physical_node_id = 3; + break; + case CHIP_ARCTURUS: + max_num_physical_nodes = 8; + max_physical_node_id = 7; + break; + default: + return -EINVAL; + } /* PF_MAX_REGION=0 means xgmi is disabled */ if (max_region) { adev->gmc.xgmi.num_physical_nodes = max_region + 1; - if (adev->gmc.xgmi.num_physical_nodes > 4) + if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) return -EINVAL; adev->gmc.xgmi.physical_node_id = REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION); - if (adev->gmc.xgmi.physical_node_id > 3) + if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) return -EINVAL; adev->gmc.xgmi.node_segment_size = REG_GET_FIELD( RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE), -- cgit From 0ea203a91247082f1294e6aa6e7802914665da50 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Tue, 3 Dec 2019 01:53:10 +0800 Subject: drm/amdgpu/sriov: No need the event 3 and 4 now As will call unload kms when initialize fail, and the unload kms will send event 3 and 4, so don't need event 3 and 4 in device init. Signed-off-by: Emily Deng Reviewed-by: Zhan Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index baee37c92352..dd46199287d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3018,8 +3018,6 @@ fence_driver_init: } dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); - if (amdgpu_virt_request_full_gpu(adev, false)) - amdgpu_virt_release_full_gpu(adev, false); goto failed; } -- cgit From a2c63407316cb01d1de5c0eb4177eae8b977327b Mon Sep 17 00:00:00 2001 From: Zhan liu Date: Mon, 2 Dec 2019 14:54:16 -0500 Subject: drm/amd/display: Adding NV14 IP Parameters [Why] NV14 IP Parameters are missing. [How] Add IP Parameters in. Signed-off-by: Zhan liu Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 68 ++++++++++++++++++++++ 1 file changed, 68 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index da7a92fc0909..5857ce79bcc2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -154,6 +154,74 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = { .xfc_fill_constant_bytes = 0, }; +struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { + .odm_capable = 1, + .gpuvm_enable = 0, + .hostvm_enable = 0, + .gpuvm_max_page_table_levels = 4, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 0, + .num_dsc = 5, + .rob_buffer_size_kbytes = 168, + .det_buffer_size_kbytes = 164, + .dpte_buffer_size_in_pte_reqs_luma = 84, + .dpte_buffer_size_in_pte_reqs_chroma = 42,//todo + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_enable = 1, + .max_page_table_levels = 4, + .pte_chunk_size_kbytes = 2, + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 2, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, + .line_buffer_fixed_bpp = 0, + .dcc_supported = true, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 12, + .writeback_max_vscl_taps = 12, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 5, + .max_num_dpp = 5, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 8, + .max_vscl_ratio = 8, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.10, + .min_vblank_lines = 32, // + .dppclk_delay_subtotal = 77, // + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 8, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 87, // + .dcfclk_cstate_latency = 10, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + .xfc_supported = true, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .ptoi_supported = 0 +}; + struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { /* Defaults that get patched on driver load from firmware. */ .clock_limits = { -- cgit From 72b741af22d4e3dbce8a71bfd5d406b2b38ee058 Mon Sep 17 00:00:00 2001 From: Zhan liu Date: Mon, 2 Dec 2019 15:12:27 -0500 Subject: drm/amd/display: Get NV14 specific ip params as needed [Why] NV14 is using its own ip params that's different from other DCN2.0 ASICs. [How] Add ASIC revision check to make sure NV14 gets correct ip params. Signed-off-by: Zhan Liu Reviewed-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 5857ce79bcc2..2ccfd84a7da4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3244,6 +3244,10 @@ static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params( uint32_t hw_internal_rev) { + /* NV14 */ + if (ASICREV_IS_NAVI14_M(hw_internal_rev)) + return &dcn2_0_nv14_ip; + /* NV12 and NV10 */ return &dcn2_0_ip; } -- cgit From 0e29be9e0bbbf9cb3d718c5c48382b1420ce0749 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Nov 2019 10:02:44 -0500 Subject: drm/amd/display: re-enable wait in pipelock, but add timeout Removing this causes hangs in some games, so re-add it, but add a timeout so we don't hang while switching flip types. Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205169 Bug: https://bugs.freedesktop.org/show_bug.cgi?id=112266 Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 03e4aafb237b..d5c18fb4436a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1112,6 +1112,25 @@ void dcn20_pipe_control_lock( if (pipe->plane_state != NULL) flip_immediate = pipe->plane_state->flip_immediate; + if (flip_immediate && lock) { + const int TIMEOUT_FOR_FLIP_PENDING = 100000; + int i; + + for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { + if (!pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) + break; + udelay(1); + } + + if (pipe->bottom_pipe != NULL) { + for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { + if (!pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) + break; + udelay(1); + } + } + } + /* In flip immediate and pipe splitting case, we need to use GSL * for synchronization. Only do setup on locking and on flip type change. */ -- cgit From f706ec87ea4cd193b772a85ff66a693520dbf12d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 20 Nov 2019 17:22:42 +0000 Subject: drm/amd/display: fix double assignment to msg_id field The msg_id field is being assigned twice. Fix this by replacing the second assignment with an assignment to msg_size. Addresses-Coverity: ("Unused value") Fixes: 11a00965d261 ("drm/amd/display: Add PSP block to verify HDCP2.2 steps") Reviewed-by: Harry Wentland Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 468f5e6c3487..ef4eb55f4474 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -42,7 +42,7 @@ static void hdcp2_message_init(struct mod_hdcp *hdcp, in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; in->process.msg2_desc.msg_size = 0; in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; - in->process.msg3_desc.msg_id = 0; + in->process.msg3_desc.msg_size = 0; } enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp) { -- cgit From 9c9290fa3ce16e0d81d89a7e82005cbf094d92d9 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Wed, 27 Nov 2019 17:33:38 +0800 Subject: drm/amd/powerplay: Remove unneeded variable 'result' in smu10_hwmgr.c Fixes coccicheck warning: drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c:1154:5-11: Unneeded variable: "result". Return "0" on line 1159 Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 1115761982a7..4e8ab139bb3b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1151,12 +1151,11 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, struct smu10_hwmgr *data = hwmgr->backend; struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; Watermarks_t *table = &(data->water_marks_table); - int result = 0; smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges); smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false); data->water_marks_exist = true; - return result; + return 0; } static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr) -- cgit From 98ce837d7b67bbd992f93f3489a300aa93fa41b3 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Wed, 27 Nov 2019 17:33:39 +0800 Subject: drm/amd/powerplay: Remove unneeded variable 'result' in vega10_hwmgr.c Fixes coccicheck warning: drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c:4363:5-11: Unneeded variable: "result". Return "0" on line 4370 Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index b29e996df1d4..4685193eca87 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -4360,14 +4360,13 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, struct vega10_hwmgr *data = hwmgr->backend; struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range; Watermarks_t *table = &(data->smc_state_table.water_marks_table); - int result = 0; if (!data->registry_data.disable_water_mark) { smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); data->water_marks_bitmap = WaterMarksExist; } - return result; + return 0; } static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) -- cgit From 7bb4b887d0f4161076c77e72b92af4f9d44c4269 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Wed, 27 Nov 2019 17:33:40 +0800 Subject: drm/amd/powerplay: Remove unneeded variable 'ret' in smu7_hwmgr.c Fixes coccicheck warning: drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c:5188:5-8: Unneeded variable: "ret". Return "0" on line 5196 Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index c3f586613c00..d70abada66bf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -5185,13 +5185,11 @@ uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) { - int ret = 0; - hwmgr->hwmgr_func = &smu7_hwmgr_funcs; if (hwmgr->pp_table_version == PP_TABLE_V0) hwmgr->pptable_func = &pptable_funcs; else if (hwmgr->pp_table_version == PP_TABLE_V1) hwmgr->pptable_func = &pptable_v1_0_funcs; - return ret; + return 0; } -- cgit From 3d218c31843ef27faf5a9169bf4176651216fd77 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Wed, 27 Nov 2019 17:33:41 +0800 Subject: drm/amd/powerplay: Remove unneeded variable 'result' in vega12_hwmgr.c Fixes coccicheck warning: drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c:502:5-11: Unneeded variable: "result". Return "0" on line 515 Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 7af9ad450ac4..aca61d1ff3c2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -499,8 +499,6 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, PPCLK_e clkID, uint32_t index, uint32_t *clock) { - int result = 0; - /* *SMU expects the Clock ID to be in the top 16 bits. *Lower 16 bits specify the level @@ -512,7 +510,7 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, *clock = smum_get_argument(hwmgr); - return result; + return 0; } static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr, -- cgit From c7d5dfa88bc4e5a05d664eff04f6981f9fdf872d Mon Sep 17 00:00:00 2001 From: zhengbin Date: Wed, 27 Nov 2019 17:33:42 +0800 Subject: drm/amd/powerplay: Remove unneeded variable 'ret' in amdgpu_smu.c Fixes coccicheck warning: drivers/gpu/drm/amd/powerplay/amdgpu_smu.c:1192:5-8: Unneeded variable: "ret". Return "0" on line 1195 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c:1945:5-8: Unneeded variable: "ret". Return "0" on line 1961 Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 36001a4f65d7..98691d41a4e7 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1189,10 +1189,9 @@ static int smu_free_memory_pool(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *memory_pool = &smu_table->memory_pool; - int ret = 0; if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) - return ret; + return 0; amdgpu_bo_free_kernel(&memory_pool->bo, &memory_pool->mc_address, @@ -1200,7 +1199,7 @@ static int smu_free_memory_pool(struct smu_context *smu) memset(memory_pool, 0, sizeof(struct smu_table)); - return ret; + return 0; } static int smu_start_smc_engine(struct smu_context *smu) @@ -1942,7 +1941,6 @@ int smu_write_watermarks_table(struct smu_context *smu) int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) { - int ret = 0; struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; void *table = watermarks->cpu_addr; @@ -1958,7 +1956,7 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, mutex_unlock(&smu->mutex); - return ret; + return 0; } const struct amd_ip_funcs smu_ip_funcs = { -- cgit From 4c56feac0d47b0cf620a8f2e62c1c9cda8e22e54 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Thu, 28 Nov 2019 10:31:37 +0800 Subject: drm/amd/display: Remove unneeded semicolon in bios_parser.c Fixes coccicheck warning: drivers/gpu/drm/amd/display/dc/bios/bios_parser.c:2192:2-3: Unneeded semicolon Reviewed-by: Harry Wentland Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 27451f2a938b..008d4d11339d 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2189,7 +2189,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id) break; default: break; - }; + } /* Unidentified device ID, return empty support mask. */ return 0; -- cgit From c09f5e000d1e96de95da1aae2ce7ba25d3d207a7 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Thu, 28 Nov 2019 10:31:38 +0800 Subject: drm/amd/display: Remove unneeded semicolon in bios_parser2.c Fixes coccicheck warning: drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c:995:2-3: Unneeded semicolon Reviewed-by: Harry Wentland Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index eb06ee765c78..4e24717ee87b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -992,7 +992,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id) break; default: break; - }; + } /* Unidentified device ID, return empty support mask. */ return 0; -- cgit From 2fd4f169030a4e735af556b14586c54298acc457 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Thu, 28 Nov 2019 10:31:39 +0800 Subject: drm/amd/display: Remove unneeded semicolon in hdcp.c Fixes coccicheck warning: drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c:506:2-3: Unneeded semicolon Reviewed-by: Harry Wentland Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index cbb5e9c063ec..8aa528e874c4 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -503,7 +503,7 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( break; default: break; - }; + } return mode; } -- cgit From f7a8b4739d94f05d4bb84b62d43157dd1bc3bc32 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Thu, 28 Nov 2019 10:31:40 +0800 Subject: drm/amd/display: Remove unneeded semicolon in display_rq_dlg_calc_21.c Fixes coccicheck warning: drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c:1525:144-145: Unneeded semicolon drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c:1526:142-143: Unneeded semicolon Reviewed-by: Harry Wentland Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index a4b103eb4b02..e60af383b4db 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -1522,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params( disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; - disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;; - disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;; + disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; + disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // Clamp to max for now if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23)) -- cgit From a501e22c8e36934b2d901b039b1c59c948886c90 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 2 Dec 2019 15:47:38 +0000 Subject: drm/amd/display: remove redundant assignment to variable v_total The variable v_total is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 16e69bbc69aa..fa57885503d4 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -122,7 +122,7 @@ static unsigned int calc_v_total_from_refresh( const struct dc_stream_state *stream, unsigned int refresh_in_uhz) { - unsigned int v_total = stream->timing.v_total; + unsigned int v_total; unsigned int frame_duration_in_ns; frame_duration_in_ns = -- cgit From 96358810c66a35b15303c4c2dd0ee48f223c221e Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Mon, 2 Dec 2019 18:41:36 +0800 Subject: amd/amdgpu/sriov swSMU disable for sriov For boards greater than ARCTURUS, and under sriov platform, swSMU is not supported because smu ip block is commented at guest driver. Generally for sriov, initialization of smu is moved to host driver. Thus, smu sw_init and hw_init will not be executed at guest driver. Without sw structure being initialized in guest driver, swSMU cannot declare to be supported. Signed-off-by: Jack Zhang Reviewed-by: Kevin Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 98691d41a4e7..0cc37cfe6eb0 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -530,9 +530,12 @@ bool is_support_sw_smu(struct amdgpu_device *adev) { if (adev->asic_type == CHIP_VEGA20) return (amdgpu_dpm == 2) ? true : false; - else if (adev->asic_type >= CHIP_ARCTURUS) - return true; - else + else if (adev->asic_type >= CHIP_ARCTURUS) { + if (amdgpu_sriov_vf(adev)) + return false; + else + return true; + } else return false; } -- cgit From c64ab8280e61fa55543de9dced87043814b44024 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 2 Dec 2019 13:16:09 +0800 Subject: drm/amdgpu: drop asd shared memory asd shared memory is not needed since drivers doesn't invoke any further cmd to asd directly after the asd loading. trust application is the one who needs to talk to asd after the initialization Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 44 +++++++++------------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 12 +++++---- 2 files changed, 18 insertions(+), 38 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 525a8dd43f1e..96a88d2e6c6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -318,35 +318,17 @@ static int psp_tmr_load(struct psp_context *psp) return ret; } -static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t asd_mc, uint64_t asd_mc_shared, - uint32_t size, uint32_t shared_size) +static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t asd_mc, uint32_t size) { cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); cmd->cmd.cmd_load_ta.app_len = size; - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(asd_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(asd_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; -} - -static int psp_asd_init(struct psp_context *psp) -{ - int ret; - - /* - * Allocate 16k memory aligned to 4k from Frame Buffer (local - * physical) for shared ASD <-> Driver - */ - ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &psp->asd_shared_bo, - &psp->asd_shared_mc_addr, - &psp->asd_shared_buf); - - return ret; + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; + cmd->cmd.cmd_load_ta.cmd_buf_len = 0; } static int psp_asd_load(struct psp_context *psp) @@ -368,11 +350,15 @@ static int psp_asd_load(struct psp_context *psp) memset(psp->fw_pri_buf, 0, PSP_1_MEG); memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); - psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr, - psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE); + psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, + psp->asd_ucode_size); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (!ret) { + psp->asd_context.asd_initialized = true; + psp->asd_context.session_id = cmd->resp.session_id; + } kfree(cmd); @@ -1211,12 +1197,6 @@ static int psp_hw_start(struct psp_context *psp) return ret; } - ret = psp_asd_init(psp); - if (ret) { - DRM_ERROR("PSP asd init failed!\n"); - return ret; - } - ret = psp_asd_load(psp); if (ret) { DRM_ERROR("PSP load asd failed!\n"); @@ -1630,8 +1610,6 @@ static int psp_hw_fini(void *handle) &psp->fw_pri_mc_addr, &psp->fw_pri_buf); amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); - amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr, - &psp->asd_shared_buf); amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, (void **)&psp->cmd_buf_mem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 40594f27dab1..5f8fd3e3535b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -32,7 +32,6 @@ #define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000 -#define PSP_ASD_SHARED_MEM_SIZE 0x4000 #define PSP_XGMI_SHARED_MEM_SIZE 0x4000 #define PSP_RAS_SHARED_MEM_SIZE 0x4000 #define PSP_1_MEG 0x100000 @@ -130,6 +129,11 @@ struct psp_xgmi_topology_info { struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES]; }; +struct psp_asd_context { + bool asd_initialized; + uint32_t session_id; +}; + struct psp_xgmi_context { uint8_t initialized; uint32_t session_id; @@ -238,15 +242,12 @@ struct psp_context struct amdgpu_bo *tmr_bo; uint64_t tmr_mc_addr; - /* asd firmware and buffer */ + /* asd firmware */ const struct firmware *asd_fw; uint32_t asd_fw_version; uint32_t asd_feature_version; uint32_t asd_ucode_size; uint8_t *asd_start_addr; - struct amdgpu_bo *asd_shared_bo; - uint64_t asd_shared_mc_addr; - void *asd_shared_buf; /* fence buffer */ struct amdgpu_bo *fence_buf_bo; @@ -281,6 +282,7 @@ struct psp_context uint32_t ta_dtm_ucode_size; uint8_t *ta_dtm_start_addr; + struct psp_asd_context asd_context; struct psp_xgmi_context xgmi_context; struct psp_ras_context ras; struct psp_hdcp_context hdcp_context; -- cgit From 71e5f0cb936526d4f84a1353e8174a45eeab747b Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 2 Dec 2019 13:37:42 +0800 Subject: drm/amdgpu: unload asd in psp hw de-init phase issue unload_ta_cmd to tOS to unload asd driver Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 36 +++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 96a88d2e6c6c..6ff5e78bf80b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -365,6 +365,40 @@ static int psp_asd_load(struct psp_context *psp) return ret; } +static void psp_prep_asd_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t asd_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; + cmd->cmd.cmd_unload_ta.session_id = asd_session_id; +} + +static int psp_asd_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->asd_context.asd_initialized) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_asd_unload_cmd_buf(cmd, psp->asd_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + if (!ret) + psp->asd_context.asd_initialized = false; + + kfree(cmd); + + return ret; +} + static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, uint32_t id, uint32_t value) { @@ -1602,6 +1636,8 @@ static int psp_hw_fini(void *handle) psp_hdcp_terminate(psp); } + psp_asd_unload(psp); + psp_ring_destroy(psp, PSP_RING_TYPE__KM); pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; -- cgit From 7091b60cadae8353866084f3d2d61affa66f6473 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 2 Dec 2019 13:44:38 +0800 Subject: drm/amdgpu: load np fw prior before loading the TAs Platform TAs will independently toggle DF Cstate. for instance, get/set topology from xgmi ta. do error injection from ras ta. In such case, PMFW needs to be loaded before TAs so that all the subsequent Cstate calls recieved by PSP FW can be routed to PMFW. Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 66 ++++++++++++++++----------------- 1 file changed, 33 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 6ff5e78bf80b..c84529718bcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1231,39 +1231,6 @@ static int psp_hw_start(struct psp_context *psp) return ret; } - ret = psp_asd_load(psp); - if (ret) { - DRM_ERROR("PSP load asd failed!\n"); - return ret; - } - - if (adev->gmc.xgmi.num_physical_nodes > 1) { - ret = psp_xgmi_initialize(psp); - /* Warning the XGMI seesion initialize failure - * Instead of stop driver initialization - */ - if (ret) - dev_err(psp->adev->dev, - "XGMI: Failed to initialize XGMI session\n"); - } - - if (psp->adev->psp.ta_fw) { - ret = psp_ras_initialize(psp); - if (ret) - dev_err(psp->adev->dev, - "RAS: Failed to initialize RAS\n"); - - ret = psp_hdcp_initialize(psp); - if (ret) - dev_err(psp->adev->dev, - "HDCP: Failed to initialize HDCP\n"); - - ret = psp_dtm_initialize(psp); - if (ret) - dev_err(psp->adev->dev, - "DTM: Failed to initialize DTM\n"); - } - return 0; } @@ -1579,6 +1546,39 @@ skip_memalloc: if (ret) goto failed; + ret = psp_asd_load(psp); + if (ret) { + DRM_ERROR("PSP load asd failed!\n"); + return ret; + } + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + ret = psp_xgmi_initialize(psp); + /* Warning the XGMI seesion initialize failure + * Instead of stop driver initialization + */ + if (ret) + dev_err(psp->adev->dev, + "XGMI: Failed to initialize XGMI session\n"); + } + + if (psp->adev->psp.ta_fw) { + ret = psp_ras_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "RAS: Failed to initialize RAS\n"); + + ret = psp_hdcp_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "HDCP: Failed to initialize HDCP\n"); + + ret = psp_dtm_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "DTM: Failed to initialize DTM\n"); + } + return 0; failed: -- cgit From f275cde7066a27ab7afa0a1d95390dbf237726a2 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Mon, 2 Dec 2019 15:04:35 +0800 Subject: drm/amdgpu/powerplay: unify smu send message function Drop smu_send_smc_msg function from ASIC specify structure. Reuse smu_send_smc_msg_with_param function for smu_send_smc_msg. Set paramer to 0 for smu_send_msg function, otherwise it will send with previous paramer value (Not a certain value). Materialize msg type for smu send message function definition. Signed-off-by: Likun Gao Reviewed-by: Kevin Wang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 9 ++++++++ drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 1 - drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 4 ++-- drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h | 5 ++--- drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h | 5 ++--- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 1 - drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 1 - drivers/gpu/drm/amd/powerplay/smu_internal.h | 4 ++-- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 29 ++------------------------ drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 28 ++----------------------- drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 1 - 11 files changed, 21 insertions(+), 67 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 0cc37cfe6eb0..c30f9a281e4c 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -2568,3 +2568,12 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu) return ret; } + +int smu_send_smc_msg(struct smu_context *smu, + enum smu_message_type msg) +{ + int ret; + + ret = smu_send_smc_msg_with_param(smu, msg, 0); + return ret; +} diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 68107de7435c..3f13986d8e50 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -2137,7 +2137,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .set_tool_table_location = smu_v11_0_set_tool_table_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, - .send_smc_msg = smu_v11_0_send_msg, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index ada4a8dc4112..ca3fdc6777cf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -500,8 +500,8 @@ struct pptable_funcs { int (*notify_memory_pool_location)(struct smu_context *smu); int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu); int (*system_features_control)(struct smu_context *smu, bool en); - int (*send_smc_msg)(struct smu_context *smu, uint16_t msg); - int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, uint32_t param); + int (*send_smc_msg_with_param)(struct smu_context *smu, + enum smu_message_type msg, uint32_t param); int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg); int (*init_display_count)(struct smu_context *smu, uint32_t count); int (*set_allowed_mask)(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index 5a277136f2aa..786de7741990 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -177,10 +177,9 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu); int smu_v11_0_system_features_control(struct smu_context *smu, bool en); -int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg); - int -smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, +smu_v11_0_send_msg_with_param(struct smu_context *smu, + enum smu_message_type msg, uint32_t param); int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index 44c65dd8850d..922973b7e29f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -44,10 +44,9 @@ int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg); int smu_v12_0_wait_for_response(struct smu_context *smu); -int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg); - int -smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, +smu_v12_0_send_msg_with_param(struct smu_context *smu, + enum smu_message_type msg, uint32_t param); int smu_v12_0_check_fw_status(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index f8422462e3b5..15403b7979d6 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -2083,7 +2083,6 @@ static const struct pptable_funcs navi10_ppt_funcs = { .set_tool_table_location = smu_v11_0_set_tool_table_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, - .send_smc_msg = smu_v11_0_send_msg, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index b44ce143e895..b076589c79ef 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -800,7 +800,6 @@ static const struct pptable_funcs renoir_ppt_funcs = { .powergate_sdma = smu_v12_0_powergate_sdma, .powergate_vcn = smu_v12_0_powergate_vcn, .powergate_jpeg = smu_v12_0_powergate_jpeg, - .send_smc_msg = smu_v12_0_send_msg, .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, .read_smc_arg = smu_v12_0_read_arg, .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index b2d81d3490cd..60ce1fccaeb5 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -77,8 +77,8 @@ #define smu_set_default_od_settings(smu, initialize) \ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -#define smu_send_smc_msg(smu, msg) \ - ((smu)->ppt_funcs->send_smc_msg? (smu)->ppt_funcs->send_smc_msg((smu), (msg)) : 0) +int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); + #define smu_send_smc_msg_with_param(smu, msg, param) \ ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0) #define smu_read_smc_arg(smu, arg) \ diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index b90eb8172849..eae3657cf1f3 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -91,36 +91,11 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; } -int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg) -{ - struct amdgpu_device *adev = smu->adev; - int ret = 0, index = 0; - - index = smu_msg_get_index(smu, msg); - if (index < 0) - return index; - - smu_v11_0_wait_for_response(smu); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - - smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index); - - ret = smu_v11_0_wait_for_response(smu); - - if (ret) - pr_err("failed send message: %10s (%d) response %#x\n", - smu_get_message_name(smu, msg), index, ret); - - return ret; - -} - int -smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, +smu_v11_0_send_msg_with_param(struct smu_context *smu, + enum smu_message_type msg, uint32_t param) { - struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 045167311ae8..269a7d73b58d 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -77,33 +77,9 @@ int smu_v12_0_wait_for_response(struct smu_context *smu) return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; } -int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg) -{ - struct amdgpu_device *adev = smu->adev; - int ret = 0, index = 0; - - index = smu_msg_get_index(smu, msg); - if (index < 0) - return index; - - smu_v12_0_wait_for_response(smu); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - - smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index); - - ret = smu_v12_0_wait_for_response(smu); - - if (ret) - pr_err("Failed to send message 0x%x, response 0x%x\n", index, - ret); - - return ret; - -} - int -smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, +smu_v12_0_send_msg_with_param(struct smu_context *smu, + enum smu_message_type msg, uint32_t param) { struct amdgpu_device *adev = smu->adev; diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 02ede5c8b73a..12bcc3e3ba99 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -3231,7 +3231,6 @@ static const struct pptable_funcs vega20_ppt_funcs = { .set_tool_table_location = smu_v11_0_set_tool_table_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, - .send_smc_msg = smu_v11_0_send_msg, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, -- cgit From 6dcab16b4118b3ef3bcefdf67d7c010aeb90a644 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Mon, 2 Dec 2019 23:12:10 -0500 Subject: drm/amdkfd: Contain MMHUB number in mmhub_v9_4_setup_vm_pt_regs() Adjust the exposed function prototype so that the caller does not need to know the MMHUB number. Signed-off-by: Yong Zhao Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 6 ++---- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h | 8 -------- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 14 ++++++++++++-- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h | 2 ++ 4 files changed, 16 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 47c853ef1051..6f1a4676ddde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -40,7 +40,7 @@ #include "soc15d.h" #include "mmhub_v1_0.h" #include "gfxhub_v1_0.h" -#include "gmc_v9_0.h" +#include "mmhub_v9_4.h" enum hqd_dequeue_request_type { @@ -774,9 +774,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi * on GFX8 and older. */ if (adev->asic_type == CHIP_ARCTURUS) { - /* Two MMHUBs */ - mmhub_v9_4_setup_vm_pt_regs(adev, 0, vmid, page_table_base); - mmhub_v9_4_setup_vm_pt_regs(adev, 1, vmid, page_table_base); + mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base); } else mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h index 971c0840358f..49e8be761214 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h @@ -36,12 +36,4 @@ extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; - -/* amdgpu_amdkfd*.c */ -void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t value); -void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t value); -void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid, - uint32_t vmid, uint64_t value); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 8599bfdb9a9e..d9301e80522a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -54,7 +54,7 @@ u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) return base; } -void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid, +static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid, uint32_t vmid, uint64_t value) { /* two registers distance between mmVML2VC0_VM_CONTEXT0_* to @@ -80,7 +80,7 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev, { uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - mmhub_v9_4_setup_vm_pt_regs(adev, hubid, 0, pt_base); + mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, @@ -101,6 +101,16 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev, (u32)(adev->gmc.gart_end >> 44)); } +void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) +{ + int i; + + for (i = 0; i < MMHUB_NUM_INSTANCES; i++) + mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid, + page_table_base); +} + static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev, int hubid) { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h index 354a4b7e875b..1b979773776c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h @@ -34,5 +34,7 @@ void mmhub_v9_4_init(struct amdgpu_device *adev); int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state); void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags); +void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); #endif -- cgit From 3e0c55f3b7be18d6bcd70b489594d54c7f9f081a Mon Sep 17 00:00:00 2001 From: Zhan Liu Date: Tue, 3 Dec 2019 12:46:01 -0500 Subject: drm/amd/display: Loading NV10/14 Bounding Box Data Directly From Code [Why] NV10/14 has released. Its time to get NV10/14 bounding box directly from code. [How] Retrieve NV10/14 bounding box data directly from code. Signed-off-by: Zhan Liu Reviewed-by: Hersen Wu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 2ccfd84a7da4..2a158ff5f0a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -83,8 +83,6 @@ #include "amdgpu_socbb.h" -/* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */ -#define SOC_BOUNDING_BOX_VALID false #define DC_LOGGER_INIT(logger) struct _vcs_dpi_ip_params_st dcn2_0_ip = { @@ -3271,12 +3269,13 @@ static bool init_soc_bounding_box(struct dc *dc, DC_LOGGER_INIT(dc->ctx->logger); - if (!bb && !SOC_BOUNDING_BOX_VALID) { + /* TODO: upstream NV12 bounding box when its launched */ + if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) { DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); return false; } - if (bb && !SOC_BOUNDING_BOX_VALID) { + if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) { int i; dcn2_0_nv12_soc.sr_exit_time_us = -- cgit From 79c4c8ea913076338e70252f947a8805daa8f91b Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Wed, 4 Dec 2019 15:51:16 +0800 Subject: drm/amdgpu: add check before enabling/disabling broadcast mode When security violation from new vbios happens, data fabric is risky to stop working. So prevent the direct access to DF mmFabricConfigAccessControl from the new vbios and onwards. Signed-off-by: Guchun Chen Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 38 +++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 16fbd2bc8ad1..4043ebcea5de 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -268,23 +268,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, { u32 tmp; - /* Put DF on broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, true); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { - tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); - tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; - WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); - } else { - tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); - tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - tmp |= DF_V3_6_MGCG_DISABLE; - WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); - } + if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) { + /* Put DF on broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, true); + + if (enable) { + tmp = RREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; + WREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } else { + tmp = RREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_DISABLE; + WREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } - /* Exit broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, false); + /* Exit broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, false); + } } static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, -- cgit From f83f5a1e115c8dc382a5abaaf0c10374fbcf1038 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 3 Dec 2019 15:40:10 -0500 Subject: drm/amdgpu/gfx: Improvement on EDC GPR workarounds SPI limits total CS waves in flight per SE to no more than 32 * num_cu and we need to stuff 40 waves on a CU to completely clean the SGPR. This is accomplished in the WR by cleaning the SE in two steps, half of the CU per step. Signed-off-by: James Zhu Reviewed-by: Yong Zhao Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 83 ++++++++++++++++++++++++++--------- 1 file changed, 63 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 1743fe281354..2616f1b59bbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3938,24 +3938,37 @@ static const struct soc15_reg_entry vgpr_init_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */ - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */ }; -static const struct soc15_reg_entry sgpr_init_regs[] = { - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */ - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 }, +static const struct soc15_reg_entry sgpr1_init_regs[] = { + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, +}; + +static const struct soc15_reg_entry sgpr2_init_regs[] = { + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, }; @@ -4065,7 +4078,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) total_size = ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4; total_size += - ((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4; + ((ARRAY_SIZE(sgpr1_init_regs) * 3) + 4 + 5 + 2) * 4; + total_size += + ((ARRAY_SIZE(sgpr2_init_regs) * 3) + 4 + 5 + 2) * 4; total_size = ALIGN(total_size, 256); vgpr_offset = total_size; total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256); @@ -4108,7 +4123,35 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 256; /* x */ + ib.ptr[ib.length_dw++] = 0x40*2; /* x */ + ib.ptr[ib.length_dw++] = 1; /* y */ + ib.ptr[ib.length_dw++] = 1; /* z */ + ib.ptr[ib.length_dw++] = + REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); + + /* write CS partial flush packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); + ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); + + /* SGPR1 */ + /* write the register state for the compute dispatch */ + for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i++) { + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); + ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i]) + - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value; + } + /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ + gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8; + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); + ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO) + - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr); + ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr); + + /* write dispatch packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); + ib.ptr[ib.length_dw++] = 0xA0*2; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4118,13 +4161,13 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); - /* SGPR */ + /* SGPR2 */ /* write the register state for the compute dispatch */ - for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) { + for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i++) { ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); - ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i]) + ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i]) - PACKET3_SET_SH_REG_START; - ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value; + ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value; } /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8; @@ -4136,7 +4179,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 256; /* x */ + ib.ptr[ib.length_dw++] = 0xA0*2; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = -- cgit From a5a4d68c9326bba7d6ab414a6c1a404f70ab29b1 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 8 Nov 2019 00:30:49 -0500 Subject: drm/amdkfd: Eliminate unnecessary kernel queue function pointers Up to this point, those functions are all the same for all ASICs, so no need to call them by functions pointers. Removing the function pointers will greatly increase the code readablity. If there is ever need for those function pointers, we can add it back then. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c | 8 +++--- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 30 ++++++++++------------ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 34 +++++++------------------ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 28 ++++++++++---------- 4 files changed, 41 insertions(+), 59 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index 1d33c4f25263..27bcc5b472f6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -72,11 +72,11 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, * The receive packet buff will be sitting on the Indirect Buffer * and in the PQ we put the IB packet + sync packet(s). */ - status = kq->ops.acquire_packet_buffer(kq, + status = kq_acquire_packet_buffer(kq, pq_packets_size_in_bytes / sizeof(uint32_t), &ib_packet_buff); if (status) { - pr_err("acquire_packet_buffer failed\n"); + pr_err("kq_acquire_packet_buffer failed\n"); return status; } @@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, if (status) { pr_err("Failed to allocate GART memory\n"); - kq->ops.rollback_packet(kq); + kq_rollback_packet(kq); return status; } @@ -151,7 +151,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, rm_packet->data_lo = QUEUESTATE__ACTIVE; - kq->ops.submit_packet(kq); + kq_submit_packet(kq); /* Wait till CP writes sync code: */ status = amdkfd_fence_wait_timeout( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 59ee9053498c..2d56dc534459 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -34,7 +34,10 @@ #define PM4_COUNT_ZERO (((1 << 15) - 1) << 16) -static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, +/* Initialize a kernel queue, including allocations of GART memory + * needed for the queue. + */ +static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, enum kfd_queue_type type, unsigned int queue_size) { struct queue_properties prop; @@ -88,7 +91,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->pq_gpu_addr = kq->pq->gpu_addr; /* For CIK family asics, kq->eop_mem is not needed */ - if (dev->device_info->asic_family > CHIP_HAWAII) { + if (dev->device_info->asic_family > CHIP_MULLINS) { retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); if (retval != 0) goto err_eop_allocate_vidmem; @@ -191,7 +194,8 @@ err_get_kernel_doorbell: } -static void uninitialize(struct kernel_queue *kq) +/* Uninitialize a kernel queue and free all its memory usages. */ +static void kq_uninitialize(struct kernel_queue *kq) { if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) kq->mqd_mgr->destroy_mqd(kq->mqd_mgr, @@ -220,7 +224,7 @@ static void uninitialize(struct kernel_queue *kq) uninit_queue(kq->queue); } -static int acquire_packet_buffer(struct kernel_queue *kq, +int kq_acquire_packet_buffer(struct kernel_queue *kq, size_t packet_size_in_dwords, unsigned int **buffer_ptr) { size_t available_size; @@ -281,7 +285,7 @@ err_no_space: return -ENOMEM; } -static void submit_packet(struct kernel_queue *kq) +void kq_submit_packet(struct kernel_queue *kq) { #ifdef DEBUG int i; @@ -304,7 +308,7 @@ static void submit_packet(struct kernel_queue *kq) } } -static void rollback_packet(struct kernel_queue *kq) +void kq_rollback_packet(struct kernel_queue *kq) { if (kq->dev->device_info->doorbell_size == 8) { kq->pending_wptr64 = *kq->wptr64_kernel; @@ -324,13 +328,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, if (!kq) return NULL; - kq->ops.initialize = initialize; - kq->ops.uninitialize = uninitialize; - kq->ops.acquire_packet_buffer = acquire_packet_buffer; - kq->ops.submit_packet = submit_packet; - kq->ops.rollback_packet = rollback_packet; - - if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) + if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) return kq; pr_err("Failed to init kernel queue\n"); @@ -341,7 +339,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, void kernel_queue_uninit(struct kernel_queue *kq) { - kq->ops.uninitialize(kq); + kq_uninitialize(kq); kfree(kq); } @@ -361,7 +359,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) return; } - retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer); + retval = kq_acquire_packet_buffer(kq, 5, &buffer); if (unlikely(retval != 0)) { pr_err(" Failed to acquire packet buffer\n"); pr_err("Kernel queue test failed\n"); @@ -369,7 +367,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) } for (i = 0; i < 5; i++) buffer[i] = kq->nop_packet; - kq->ops.submit_packet(kq); + kq_submit_packet(kq); pr_err("Ending kernel queue test\n"); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index 475e9499c0af..f4cfe9f1871c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -29,44 +29,28 @@ #include "kfd_priv.h" /** - * struct kernel_queue_ops - * - * @initialize: Initialize a kernel queue, including allocations of GART memory - * needed for the queue. - * - * @uninitialize: Uninitialize a kernel queue and free all its memory usages. - * - * @acquire_packet_buffer: Returns a pointer to the location in the kernel + * kq_acquire_packet_buffer: Returns a pointer to the location in the kernel * queue ring buffer where the calling function can write its packet. It is * Guaranteed that there is enough space for that packet. It also updates the * pending write pointer to that location so subsequent calls to * acquire_packet_buffer will get a correct write pointer * - * @submit_packet: Update the write pointer and doorbell of a kernel queue. - * - * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel - * queue are equal, which means the CP has read all the submitted packets. + * kq_submit_packet: Update the write pointer and doorbell of a kernel queue. * - * @rollback_packet: This routine is called if we failed to build an acquired + * kq_rollback_packet: This routine is called if we failed to build an acquired * packet for some reason. It just overwrites the pending wptr with the current * one * */ -struct kernel_queue_ops { - bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size); - void (*uninitialize)(struct kernel_queue *kq); - int (*acquire_packet_buffer)(struct kernel_queue *kq, - size_t packet_size_in_dwords, - unsigned int **buffer_ptr); - void (*submit_packet)(struct kernel_queue *kq); - void (*rollback_packet)(struct kernel_queue *kq); -}; +int kq_acquire_packet_buffer(struct kernel_queue *kq, + size_t packet_size_in_dwords, + unsigned int **buffer_ptr); +void kq_submit_packet(struct kernel_queue *kq); +void kq_rollback_packet(struct kernel_queue *kq); -struct kernel_queue { - struct kernel_queue_ops ops; +struct kernel_queue { /* data */ struct kfd_dev *dev; struct mqd_manager *mqd_mgr; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 700be4f80867..6cabed06ef5d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -278,7 +278,7 @@ int pm_send_set_resources(struct packet_manager *pm, size = pm->pmf->set_resources_size; mutex_lock(&pm->lock); - pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + kq_acquire_packet_buffer(pm->priv_queue, size / sizeof(uint32_t), (unsigned int **)&buffer); if (!buffer) { @@ -289,9 +289,9 @@ int pm_send_set_resources(struct packet_manager *pm, retval = pm->pmf->set_resources(pm, buffer, res); if (!retval) - pm->priv_queue->ops.submit_packet(pm->priv_queue); + kq_submit_packet(pm->priv_queue); else - pm->priv_queue->ops.rollback_packet(pm->priv_queue); + kq_rollback_packet(pm->priv_queue); out: mutex_unlock(&pm->lock); @@ -316,7 +316,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t); mutex_lock(&pm->lock); - retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + retval = kq_acquire_packet_buffer(pm->priv_queue, packet_size_dwords, &rl_buffer); if (retval) goto fail_acquire_packet_buffer; @@ -326,14 +326,14 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) if (retval) goto fail_create_runlist; - pm->priv_queue->ops.submit_packet(pm->priv_queue); + kq_submit_packet(pm->priv_queue); mutex_unlock(&pm->lock); return retval; fail_create_runlist: - pm->priv_queue->ops.rollback_packet(pm->priv_queue); + kq_rollback_packet(pm->priv_queue); fail_acquire_packet_buffer: mutex_unlock(&pm->lock); fail_create_runlist_ib: @@ -352,7 +352,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, size = pm->pmf->query_status_size; mutex_lock(&pm->lock); - pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + kq_acquire_packet_buffer(pm->priv_queue, size / sizeof(uint32_t), (unsigned int **)&buffer); if (!buffer) { pr_err("Failed to allocate buffer on kernel queue\n"); @@ -362,9 +362,9 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value); if (!retval) - pm->priv_queue->ops.submit_packet(pm->priv_queue); + kq_submit_packet(pm->priv_queue); else - pm->priv_queue->ops.rollback_packet(pm->priv_queue); + kq_rollback_packet(pm->priv_queue); out: mutex_unlock(&pm->lock); @@ -381,7 +381,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, size = pm->pmf->unmap_queues_size; mutex_lock(&pm->lock); - pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + kq_acquire_packet_buffer(pm->priv_queue, size / sizeof(uint32_t), (unsigned int **)&buffer); if (!buffer) { pr_err("Failed to allocate buffer on kernel queue\n"); @@ -392,9 +392,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param, reset, sdma_engine); if (!retval) - pm->priv_queue->ops.submit_packet(pm->priv_queue); + kq_submit_packet(pm->priv_queue); else - pm->priv_queue->ops.rollback_packet(pm->priv_queue); + kq_rollback_packet(pm->priv_queue); out: mutex_unlock(&pm->lock); @@ -439,7 +439,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm) size = pm->pmf->query_status_size; mutex_lock(&pm->lock); - pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + kq_acquire_packet_buffer(pm->priv_queue, size / sizeof(uint32_t), (unsigned int **)&buffer); if (!buffer) { pr_err("Failed to allocate buffer on kernel queue\n"); @@ -447,7 +447,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm) goto out; } memset(buffer, 0x55, size); - pm->priv_queue->ops.submit_packet(pm->priv_queue); + kq_submit_packet(pm->priv_queue); pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.", buffer[0], buffer[1], buffer[2], buffer[3], -- cgit From 80c5a807a51f2d19df4a5a5d12cf9d4997993999 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 4 Dec 2019 22:07:49 -0500 Subject: drm/amdgpu: add header line for power profile on Arcturus So the output is consistent with other asics. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 3f13986d8e50..199c96caa882 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -1320,12 +1320,17 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, "VR", "COMPUTE", "CUSTOM"}; + static const char *title[] = { + "PROFILE_INDEX(NAME)"}; uint32_t i, size = 0; int16_t workload_type = 0; if (!smu->pm_enabled || !buf) return -EINVAL; + size += sprintf(buf + size, "%16s\n", + title[0]); + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { /* * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT -- cgit From b456c93253bde374057bfca8f74ada527250d5d9 Mon Sep 17 00:00:00 2001 From: Pierre-Eric Pelloux-Prayer Date: Thu, 28 Nov 2019 12:08:58 +0100 Subject: drm/amdgpu: add cache flush workaround to gfx8 emit_fence The same workaround is used for gfx7. Both PAL and Mesa use it for gfx8 too, so port this commit to gfx_v8_0_ring_emit_fence_gfx. Signed-off-by: Pierre-Eric Pelloux-Prayer Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 9b0b22522e83..cfc1403fc855 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6145,7 +6145,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; - /* EVENT_WRITE_EOP - flush caches, send int */ + /* Workaround for cache flush problems. First send a dummy EOP + * event down the pipe with seq one below. + */ + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EOP_TC_WB_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(1) | INT_SEL(0)); + amdgpu_ring_write(ring, lower_32_bits(seq - 1)); + amdgpu_ring_write(ring, upper_32_bits(seq - 1)); + + /* Then send the real EOP event down the pipe: + * EVENT_WRITE_EOP - flush caches, send int */ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | @@ -6887,7 +6903,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 5 + /* COND_EXEC */ 7 + /* PIPELINE_SYNC */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */ - 8 + /* FENCE for VM_FLUSH */ + 12 + /* FENCE for VM_FLUSH */ 20 + /* GDS switch */ 4 + /* double SWITCH_BUFFER, the first COND_EXEC jump to the place just @@ -6899,7 +6915,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 31 + /* DE_META */ 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ - 8 + 8 + /* FENCE x2 */ + 12 + 12 + /* FENCE x2 */ 2, /* SWITCH_BUFFER */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ .emit_ib = gfx_v8_0_ring_emit_ib_gfx, -- cgit From 4a2d93565ada87a3a2d72b1ab26335ea785b3d3c Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 22 Oct 2019 02:41:26 +0800 Subject: drm/amdgpu: remove ras global recovery handling from ras_controller_int handler v2: add notification when ras controller interrupt generates Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 0db458f9fafc..25231d699341 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -324,7 +324,12 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device RAS_CNTLR_INTERRUPT_CLEAR, 1); WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); - amdgpu_ras_global_ras_isr(adev); + DRM_WARN("RAS controller interrupt triggered by NBIF error\n"); + + /* ras_controller_int is dedicated for nbif ras error, + * not the global interrupt for sync flood + */ + amdgpu_ras_reset_gpu(adev, true); } } -- cgit From f2a79be1c094f4d664b6a3fbfd9b5a61f8ff7f02 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 25 Nov 2019 12:26:09 +0800 Subject: drm/amdgpu: export amdgpu_ras_find_obj to use externally Change it to external interface. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 5 +---- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 3 +++ 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1593564578b0..04394c45aa03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -198,9 +198,6 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, return 0; } -static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, - struct ras_common_if *head); - /** * DOC: AMDGPU RAS debugfs control interface * @@ -445,7 +442,7 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, } /* return an obj equal to head, or the first when head is NULL */ -static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, +struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, struct ras_common_if *head) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index f80fd3428c98..a2c1ac1b9572 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -611,6 +611,9 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, struct ras_dispatch_if *info); +struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, + struct ras_common_if *head); + extern atomic_t amdgpu_ras_in_intr; static inline bool amdgpu_ras_intr_triggered(void) -- cgit From 28f87950d935eec2ba1076933535213f4f5c8a06 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 22 Nov 2019 17:56:47 +0800 Subject: drm/amdgpu: clear ras controller status registers when interrupt occurs To fix issue that ras controller interrupt cannot be triggered anymore after one time nbif uncorrectable error. And error count is stored in nbif ras object for query. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 25231d699341..9a3a65a0691c 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -52,6 +52,9 @@ #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status); + static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, @@ -314,6 +317,7 @@ static void nbio_v7_4_init_registers(struct amdgpu_device *adev) static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) { uint32_t bif_doorbell_intr_cntl; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); if (REG_GET_FIELD(bif_doorbell_intr_cntl, @@ -324,6 +328,12 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device RAS_CNTLR_INTERRUPT_CLEAR, 1); WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + /* + * clear error status after ras_controller_intr according to + * hw team and count ue number for query + */ + nbio_v7_4_query_ras_error_count(adev, &obj->err_data); + DRM_WARN("RAS controller interrupt triggered by NBIF error\n"); /* ras_controller_int is dedicated for nbif ras error, -- cgit From 5c39d600e315ce1a0d13c491693c0390d40c94f3 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 22 Nov 2019 18:39:11 +0800 Subject: drm/amdgpu: clear uncorrectable parity error status bit This should be cleared during every nbif uncorrectable error cleanup work. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 9a3a65a0691c..bb701dbfd472 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -482,10 +482,12 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a return 0; } +#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 + static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - uint32_t global_sts, central_sts, int_eoi; + uint32_t global_sts, central_sts, int_eoi, parity_sts; uint32_t corr, fatal, non_fatal; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; @@ -494,6 +496,7 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal); non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrNonFatal); + parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2); if (corr) err_data->ce_count++; @@ -505,6 +508,11 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, /* clear error status register */ WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); + if (fatal) + /* clear parity fatal error indication field */ + WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, + parity_sts); + if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS, BIFL_RasContller_Intr_Recv)) { /* clear interrupt status register */ -- cgit From 7a22677b9514d0e819eb85d687eb8d8ef10ab330 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 26 Nov 2019 17:24:56 +0800 Subject: drm/amdgpu: enable/disable doorbell interrupt in baco entry/exit helper This operation is needed when baco entry/exit for ras recovery Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index dd46199287d8..fc53faac4147 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4306,10 +4306,14 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) int amdgpu_device_baco_enter(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); if (!amdgpu_device_supports_baco(adev->ddev)) return -ENOTSUPP; + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, false); + if (is_support_sw_smu(adev)) { struct smu_context *smu = &adev->smu; int ret; @@ -4317,8 +4321,6 @@ int amdgpu_device_baco_enter(struct drm_device *dev) ret = smu_baco_enter(smu); if (ret) return ret; - - return 0; } else { void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -4329,14 +4331,15 @@ int amdgpu_device_baco_enter(struct drm_device *dev) /* enter BACO state */ if (pp_funcs->set_asic_baco_state(pp_handle, 1)) return -EIO; - - return 0; } + + return 0; } int amdgpu_device_baco_exit(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); if (!amdgpu_device_supports_baco(adev->ddev)) return -ENOTSUPP; @@ -4349,7 +4352,6 @@ int amdgpu_device_baco_exit(struct drm_device *dev) if (ret) return ret; - return 0; } else { void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -4360,7 +4362,10 @@ int amdgpu_device_baco_exit(struct drm_device *dev) /* exit BACO state */ if (pp_funcs->set_asic_baco_state(pp_handle, 0)) return -EIO; - - return 0; } + + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, true); + + return 0; } -- cgit From ce316fa55ef0f1751276b846a54fb3b835bd5e64 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 26 Nov 2019 22:12:31 +0800 Subject: drm/amdgpu: add concurrent baco reset support for XGMI Currently each XGMI node reset wq does not run in parrallel if bound to same cpu. Make change to bound the xgmi_reset_work item to different cpus. XGMI requires all nodes enter into baco within very close proximity before any node exit baco. So schedule the xgmi_reset_work wq twice for enter/exit baco respectively. To use baco for XGMI, PMFW supported for baco on XGMI needs to be involved. The case that PSP reset and baco reset coexist within an XGMI hive never exist and is not in the consideration. v2: define use_baco flag to simplify the code for xgmi baco sequence Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 82 +++++++++++++++++++++++++----- 2 files changed, 72 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2e5b2f467911..6003f9454354 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -992,6 +992,8 @@ struct amdgpu_device { bool pm_sysfs_en; bool ucode_sysfs_en; + + bool in_baco; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fc53faac4147..114f5bca581a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2661,7 +2661,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) struct amdgpu_device *adev = container_of(__work, struct amdgpu_device, xgmi_reset_work); - adev->asic_reset_res = amdgpu_asic_reset(adev); + if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) + adev->asic_reset_res = (adev->in_baco == false) ? + amdgpu_device_baco_enter(adev->ddev) : + amdgpu_device_baco_exit(adev->ddev); + else + adev->asic_reset_res = amdgpu_asic_reset(adev); + if (adev->asic_reset_res) DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", adev->asic_reset_res, adev->ddev->unique); @@ -3787,13 +3793,18 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, return r; } -static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, +static int amdgpu_do_asic_reset(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive, struct list_head *device_list_handle, bool *need_full_reset_arg) { struct amdgpu_device *tmp_adev = NULL; bool need_full_reset = *need_full_reset_arg, vram_lost = false; int r = 0; + int cpu = smp_processor_id(); + bool use_baco = + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ? + true : false; /* * ASIC reset has to be done on all HGMI hive nodes ASAP @@ -3801,21 +3812,24 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, */ if (need_full_reset) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - /* For XGMI run all resets in parallel to speed up the process */ + /* + * For XGMI run all resets in parallel to speed up the + * process by scheduling the highpri wq on different + * cpus. For XGMI with baco reset, all nodes must enter + * baco within close proximity before anyone exit. + */ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work)) + if (!queue_work_on(cpu, system_highpri_wq, + &tmp_adev->xgmi_reset_work)) r = -EALREADY; + cpu = cpumask_next(cpu, cpu_online_mask); } else r = amdgpu_asic_reset(tmp_adev); - - if (r) { - DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", - r, tmp_adev->ddev->unique); + if (r) break; - } } - /* For XGMI wait for all PSP resets to complete before proceed */ + /* For XGMI wait for all work to complete before proceed */ if (!r) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { @@ -3824,11 +3838,54 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, r = tmp_adev->asic_reset_res; if (r) break; + if (use_baco) + tmp_adev->in_baco = true; } } } - } + /* + * For XGMI with baco reset, need exit baco phase by scheduling + * xgmi_reset_work one more time. PSP reset and sGPU skips this + * phase. Not assume the situation that PSP reset and baco reset + * coexist within an XGMI hive. + */ + + if (!r && use_baco) { + cpu = smp_processor_id(); + list_for_each_entry(tmp_adev, device_list_handle, + gmc.xgmi.head) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + if (!queue_work_on(cpu, + system_highpri_wq, + &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + if (r) + break; + cpu = cpumask_next(cpu, cpu_online_mask); + } + } + } + + if (!r && use_baco) { + list_for_each_entry(tmp_adev, device_list_handle, + gmc.xgmi.head) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + tmp_adev->in_baco = false; + } + } + } + + if (r) { + DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", + r, tmp_adev->ddev->unique); + goto end; + } + } list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { if (need_full_reset) { @@ -4113,7 +4170,8 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ if (r) adev->asic_reset_res = r; } else { - r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); + r = amdgpu_do_asic_reset(adev, hive, device_list_handle, + &need_full_reset); if (r && r == -EAGAIN) goto retry; } -- cgit From b823821f2244add19a71e7fe6c8f8550a29d672d Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 27 Nov 2019 13:17:17 +0800 Subject: drm/amdgpu: support full gpu reset workflow when ras err_event_athub occurs This athub fatal error can be recovered by baco without system-level reboot, so add a mode to use baco for the recovery. Not affect the default psp reset situations for now. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 114f5bca581a..2957ebf9b97c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4018,12 +4018,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_device *tmp_adev = NULL; int i, r = 0; bool in_ras_intr = amdgpu_ras_intr_triggered(); + bool use_baco = + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ? + true : false; /* * Flush RAM to disk so that after reboot * the user can read log and see why the system rebooted. */ - if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) { + if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) { DRM_WARN("Emergency reboot."); @@ -4034,7 +4037,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, need_full_reset = job_signaled = false; INIT_LIST_HEAD(&device_list); - dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset"); + dev_info(adev->dev, "GPU %s begin!\n", + (in_ras_intr && !use_baco) ? "jobs stop":"reset"); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -4101,7 +4105,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, amdgpu_unregister_gpu_instance(tmp_adev); /* disable ras on ALL IPs */ - if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev)) + if (!(in_ras_intr && !use_baco) && + amdgpu_device_ip_need_full_reset(tmp_adev)) amdgpu_ras_suspend(tmp_adev); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -4112,13 +4117,13 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, drm_sched_stop(&ring->sched, job ? &job->base : NULL); - if (in_ras_intr) + if (in_ras_intr && !use_baco) amdgpu_job_stop_all_jobs_on_sched(&ring->sched); } } - if (in_ras_intr) + if (in_ras_intr && !use_baco) goto skip_sched_resume; /* @@ -4212,7 +4217,7 @@ skip_hw_reset: skip_sched_resume: list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { /*unlock kfd: SRIOV would do it separately */ - if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev)) + if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev)) amdgpu_amdkfd_post_reset(tmp_adev); amdgpu_device_unlock_adev(tmp_adev); } -- cgit From 00eaa57172a02edddbf445112409e807e0caacd9 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 25 Oct 2019 17:19:38 +0800 Subject: drm/amdgpu: clear err_event_athub flag after reset exit Otherwise next err_event_athub error cannot call gpu reset. And following resume sequence will not be affected by this flag. v2: create function to clear amdgpu_ras_in_intr for modularity of ras driver Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 5 +++++ 2 files changed, 8 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2957ebf9b97c..5a8506182ade 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3887,6 +3887,9 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev, } } + if (!r && amdgpu_ras_intr_triggered()) + amdgpu_ras_intr_cleared(); + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { if (need_full_reset) { /* post card */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index a2c1ac1b9572..d4ade4739245 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -621,6 +621,11 @@ static inline bool amdgpu_ras_intr_triggered(void) return !!atomic_read(&amdgpu_ras_in_intr); } +static inline void amdgpu_ras_intr_cleared(void) +{ + atomic_set(&amdgpu_ras_in_intr, 0); +} + void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); #endif -- cgit From 76434f75d4a48525d825ec8ee0c018db4cc81f77 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 27 Nov 2019 16:51:22 +0800 Subject: drm/amdgpu: reduce redundant uvd context lost warning message Move the print out of uvd instance loop in amdgpu_uvd_suspend v2: drop unnecessary brackets v3: grab ras_intr state once for multiple times use Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e324bfe6c58f..d587ffe2af8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -349,6 +349,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) unsigned size; void *ptr; int i, j; + bool in_ras_intr = amdgpu_ras_intr_triggered(); cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -376,13 +377,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) return -ENOMEM; /* re-write 0 since err_event_athub will corrupt VCPU buffer */ - if (amdgpu_ras_intr_triggered()) { - DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); + if (in_ras_intr) memset(adev->uvd.inst[j].saved_bo, 0, size); - } else { + else memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); - } } + + if (in_ras_intr) + DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); + return 0; } -- cgit From d3511fd01cef7da850a93cd51e965ad6450dd95e Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 5 Nov 2019 11:59:38 -0500 Subject: drm/amd/display: update sr and pstate latencies for Renoir [Why] DF team has produced more optimized latency numbers. [How] Add sr latencies to the wm table, use different latencies for different wm sets. Also fix bb override from registery key for these latencies. Signed-off-by: Eric Yang Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 16 ++++++++++++---- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 15 ++++++++++++--- drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 2 ++ 3 files changed, 26 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 790a2d211bd6..841095d09d3c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -523,25 +523,33 @@ struct clk_bw_params rn_bw_params = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 6.09, + .sr_enter_plus_exit_time_us = 7.14, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, .valid = true, }, }, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index dd3bc37d4eb9..818c7a629484 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -972,6 +972,8 @@ static void calculate_wm_set_for_vlevel( pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; @@ -987,14 +989,21 @@ static void calculate_wm_set_for_vlevel( static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) { + int i; + kernel_fpu_begin(); if (dc->bb_overrides.sr_exit_time_ns) { - bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = + dc->bb_overrides.sr_exit_time_ns / 1000.0; + } } if (dc->bb_overrides.sr_enter_plus_exit_time_ns) { - bb->sr_enter_plus_exit_time_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } } if (dc->bb_overrides.urgent_latency_ns) { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index f55203e427de..4aa09fe954c5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -66,6 +66,8 @@ struct wm_range_table_entry { unsigned int wm_inst; unsigned int wm_type; double pstate_latency_us; + double sr_exit_time_us; + double sr_enter_plus_exit_time_us; bool valid; }; -- cgit From 2b77dcc5e5aa382832af93a581de8293db72d888 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 5 Nov 2019 13:04:34 -0500 Subject: drm/amd/display: rename core_dc to dc [Why] First, to make code more consistent Second, to get rid of those scenario where we create a second local pointer to dc when it's already passed in. [How] Rename core_dc to dc Remove duplicate local pointers to dc Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c | 12 ++-- .../dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c | 6 +- .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c | 6 +- drivers/gpu/drm/amd/display/dc/core/dc_debug.c | 7 +-- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 65 +++++++++++----------- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 26 ++++----- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 3 +- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 40 ++++++------- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 22 ++++---- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 8 +-- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 10 ++-- .../amd/display/dc/irq/dce110/irq_service_dce110.c | 4 +- 12 files changed, 102 insertions(+), 107 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index a6c46e903ff9..d031bd3d3072 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -72,8 +72,8 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct bp_set_dce_clock_parameters dce_clk_params; struct dc_bios *bp = clk_mgr_base->ctx->dc_bios; - struct dc *core_dc = clk_mgr_base->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr_base->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; int actual_clock = requested_clk_khz; /* Prepare to program display clock*/ memset(&dce_clk_params, 0, sizeof(dce_clk_params)); @@ -110,7 +110,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) bp->funcs->set_dce_clock(bp, &dce_clk_params); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) dmcu->funcs->set_psr_wait_loop(dmcu, @@ -126,8 +126,8 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) { struct bp_set_dce_clock_parameters dce_clk_params; struct dc_bios *bp = clk_mgr->base.ctx->dc_bios; - struct dc *core_dc = clk_mgr->base.ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr->base.ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; int actual_clock = requested_clk_khz; /* Prepare to program display clock*/ memset(&dce_clk_params, 0, sizeof(dce_clk_params)); @@ -152,7 +152,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr->dfs_bypass_disp_clk != actual_clock) dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 1897e91c8ccb..97b7f32294fd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -88,8 +88,8 @@ int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz) { int actual_dispclk_set_mhz = -1; - struct dc *core_dc = clk_mgr->base.ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr->base.ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; /* Unit of SMU msg parameter is Mhz */ actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param( @@ -100,7 +100,7 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di /* Actual dispclk set is returned in the parameter register */ actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000; - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index cb7c0e8b7e1b..6878aedf1d3e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -82,8 +82,8 @@ int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr) int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz) { int actual_dispclk_set_mhz = -1; - struct dc *core_dc = clk_mgr->base.ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr->base.ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; /* Unit of SMU msg parameter is Mhz */ actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param( @@ -91,7 +91,7 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis VBIOSSMC_MSG_SetDispclkFreq, requested_dispclk_khz / 1000); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 85a52a16295a..bf13cffed703 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -310,14 +310,13 @@ void context_timing_trace( struct resource_context *res_ctx) { int i; - struct dc *core_dc = dc; int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0}; struct crtc_position position; - unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; + unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; DC_LOGGER_INIT(dc->ctx->logger); - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; /* get_position() returns CRTC vertical/horizontal counter * hence not applicable for underlay pipe @@ -329,7 +328,7 @@ void context_timing_trace( h_pos[i] = position.horizontal_count; v_pos[i] = position.vertical_count; } - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 123b79dcd8e4..093f6c808876 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2355,9 +2355,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp) { - struct dc *core_dc = link->ctx->dc; - struct abm *abm = core_dc->res_pool->abm; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct abm *abm = dc->res_pool->abm; + struct dmcu *dmcu = dc->res_pool->dmcu; unsigned int controller_id = 0; bool use_smooth_brightness = true; int i; @@ -2375,22 +2375,22 @@ bool dc_link_set_backlight_level(const struct dc_link *link, if (dc_is_embedded_signal(link->connector_signal)) { for (i = 0; i < MAX_PIPES; i++) { - if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { - if (core_dc->current_state->res_ctx. + if (dc->current_state->res_ctx.pipe_ctx[i].stream) { + if (dc->current_state->res_ctx. pipe_ctx[i].stream->link == link) { /* DMCU -1 for all controller id values, * therefore +1 here */ controller_id = - core_dc->current_state-> + dc->current_state-> res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; /* Disable brightness ramping when the display is blanked * as it can hang the DMCU */ - if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) + if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) frame_ramp = 0; } } @@ -2408,8 +2408,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link, bool dc_link_set_abm_disable(const struct dc_link *link) { - struct dc *core_dc = link->ctx->dc; - struct abm *abm = core_dc->res_pool->abm; + struct dc *dc = link->ctx->dc; + struct abm *abm = dc->res_pool->abm; if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL)) return false; @@ -2421,8 +2421,8 @@ bool dc_link_set_abm_disable(const struct dc_link *link) bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait) { - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; @@ -2436,8 +2436,8 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state) { - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; if (dmcu != NULL && link->psr_feature_enabled) dmcu->funcs->get_psr_state(dmcu, psr_state); @@ -2484,7 +2484,7 @@ bool dc_link_setup_psr(struct dc_link *link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context) { - struct dc *core_dc; + struct dc *dc; struct dmcu *dmcu; int i; /* updateSinkPsrDpcdConfig*/ @@ -2495,8 +2495,8 @@ bool dc_link_setup_psr(struct dc_link *link, if (!link) return false; - core_dc = link->ctx->dc; - dmcu = core_dc->res_pool->dmcu; + dc = link->ctx->dc; + dmcu = dc->res_pool->dmcu; if (!dmcu) return false; @@ -2535,13 +2535,13 @@ bool dc_link_setup_psr(struct dc_link *link, psr_context->engineId = link->link_enc->preferred_engine; for (i = 0; i < MAX_PIPES; i++) { - if (core_dc->current_state->res_ctx.pipe_ctx[i].stream + if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { /* dmcu -1 for all controller id values, * therefore +1 here */ psr_context->controllerId = - core_dc->current_state->res_ctx. + dc->current_state->res_ctx. pipe_ctx[i].stream_res.tg->inst + 1; break; } @@ -2905,12 +2905,12 @@ void core_link_enable_stream( struct dc_state *state, struct pipe_ctx *pipe_ctx) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; enum dc_status status; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) return; @@ -2953,14 +2953,14 @@ void core_link_enable_stream( pipe_ctx->stream_res.stream_enc, &stream->timing); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { bool apply_edp_fast_boot_optimization = pipe_ctx->stream->apply_edp_fast_boot_optimization; pipe_ctx->stream->apply_edp_fast_boot_optimization = false; resource_build_info_frame(pipe_ctx); - core_dc->hwss.update_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); /* Do not touch link on seamless boot optimization. */ if (pipe_ctx->stream->apply_seamless_boot_optimization) { @@ -3003,7 +3003,7 @@ void core_link_enable_stream( } } - core_dc->hwss.enable_audio_stream(pipe_ctx); + dc->hwss.enable_audio_stream(pipe_ctx); /* turn off otg test pattern if enable */ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) @@ -3016,7 +3016,7 @@ void core_link_enable_stream( dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); } - core_dc->hwss.enable_stream(pipe_ctx); + dc->hwss.enable_stream(pipe_ctx); /* Set DPS PPS SDP (AKA "info frames") */ if (pipe_ctx->stream->timing.flags.DSC) { @@ -3028,7 +3028,7 @@ void core_link_enable_stream( if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_allocate_mst_payload(pipe_ctx); - core_dc->hwss.unblank_stream(pipe_ctx, + dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); if (dc_is_dp_signal(pipe_ctx->stream->signal)) @@ -3036,8 +3036,7 @@ void core_link_enable_stream( #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, false); #endif - } - else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) + } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); @@ -3047,11 +3046,11 @@ void core_link_enable_stream( void core_link_disable_stream(struct pipe_ctx *pipe_ctx) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) return; @@ -3059,7 +3058,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) update_psp_stream_config(pipe_ctx, true); #endif - core_dc->hwss.blank_stream(pipe_ctx); + dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); @@ -3088,7 +3087,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) write_i2c_redriver_setting(pipe_ctx, false); } } - core_dc->hwss.disable_stream(pipe_ctx); + dc->hwss.disable_stream(pipe_ctx); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); if (pipe_ctx->stream->timing.flags.DSC) { @@ -3099,12 +3098,12 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; if (!dc_is_hdmi_signal(pipe_ctx->stream->signal)) return; - core_dc->hwss.set_avmute(pipe_ctx, enable); + dc->hwss.set_avmute(pipe_ctx, enable); } /** diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index bb1e8e5b5252..67ce12df23f1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -95,8 +95,8 @@ void dp_enable_link_phy( const struct dc_link_settings *link_settings) { struct link_encoder *link_enc = link->link_enc; - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; @@ -200,8 +200,8 @@ bool edp_receiver_ready_T7(struct dc_link *link) void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) { - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; if (!link->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(link, false); @@ -395,14 +395,14 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc, static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; bool result = false; - if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) result = true; else - result = dm_helpers_dp_write_dsc_enable(core_dc->ctx, stream, enable); + result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); return result; } @@ -412,7 +412,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -448,7 +448,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -473,7 +473,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) OPTC_DSC_DISABLED, 0, 0); /* disable DSC in stream encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( pipe_ctx->stream_res.stream_enc, OPTC_DSC_DISABLED, 0, 0); @@ -516,7 +516,7 @@ out: bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; if (!pipe_ctx->stream->timing.flags.DSC || !dsc) @@ -535,7 +535,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) DC_LOG_DSC(" "); dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, @@ -544,7 +544,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) } } else { /* disable DSC PPS in stream encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, false, NULL); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 89b5f86cd40b..a9412720c860 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2747,9 +2747,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream) { - struct dc *core_dc = dc; struct dc_link *link = stream->link; - struct timing_generator *tg = core_dc->res_pool->timing_generators[0]; + struct timing_generator *tg = dc->res_pool->timing_generators[0]; enum dc_status res = DC_OK; calculate_phy_pix_clks(stream); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index d9afd834c146..70b7c1eb8a8f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -271,7 +271,7 @@ bool dc_stream_set_cursor_attributes( const struct dc_cursor_attributes *attributes) { int i; - struct dc *core_dc; + struct dc *dc; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; @@ -289,8 +289,8 @@ bool dc_stream_set_cursor_attributes( return false; } - core_dc = stream->ctx->dc; - res_ctx = &core_dc->current_state->res_ctx; + dc = stream->ctx->dc; + res_ctx = &dc->current_state->res_ctx; stream->cursor_attributes = *attributes; for (i = 0; i < MAX_PIPES; i++) { @@ -302,17 +302,17 @@ bool dc_stream_set_cursor_attributes( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - delay_cursor_until_vupdate(pipe_ctx, core_dc); - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true); + delay_cursor_until_vupdate(pipe_ctx, dc); + dc->hwss.pipe_control_lock(dc, pipe_to_program, true); } - core_dc->hwss.set_cursor_attribute(pipe_ctx); - if (core_dc->hwss.set_cursor_sdr_white_level) - core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); + if (dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); } if (pipe_to_program) - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false); + dc->hwss.pipe_control_lock(dc, pipe_to_program, false); return true; } @@ -322,7 +322,7 @@ bool dc_stream_set_cursor_position( const struct dc_cursor_position *position) { int i; - struct dc *core_dc; + struct dc *dc; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; @@ -336,8 +336,8 @@ bool dc_stream_set_cursor_position( return false; } - core_dc = stream->ctx->dc; - res_ctx = &core_dc->current_state->res_ctx; + dc = stream->ctx->dc; + res_ctx = &dc->current_state->res_ctx; stream->cursor_position = *position; for (i = 0; i < MAX_PIPES; i++) { @@ -353,15 +353,15 @@ bool dc_stream_set_cursor_position( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - delay_cursor_until_vupdate(pipe_ctx, core_dc); - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true); + delay_cursor_until_vupdate(pipe_ctx, dc); + dc->hwss.pipe_control_lock(dc, pipe_to_program, true); } - core_dc->hwss.set_cursor_position(pipe_ctx); + dc->hwss.set_cursor_position(pipe_ctx); } if (pipe_to_program) - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false); + dc->hwss.pipe_control_lock(dc, pipe_to_program, false); return true; } @@ -482,9 +482,9 @@ bool dc_stream_remove_writeback(struct dc *dc, uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) { uint8_t i; - struct dc *core_dc = stream->ctx->dc; + struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = - &core_dc->current_state->res_ctx; + &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -541,9 +541,9 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, { uint8_t i; bool ret = false; - struct dc *core_dc = stream->ctx->dc; + struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = - &core_dc->current_state->res_ctx; + &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index e60aff46d510..ea1229a3e2b2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -108,16 +108,14 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, struct dc_plane_state *dc_create_plane_state(struct dc *dc) { - struct dc *core_dc = dc; - struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), - GFP_KERNEL); + GFP_KERNEL); if (NULL == plane_state) return NULL; kref_init(&plane_state->refcount); - dc_plane_construct(core_dc->ctx, plane_state); + dc_plane_construct(dc->ctx, plane_state); return plane_state; } @@ -137,7 +135,7 @@ const struct dc_plane_status *dc_plane_get_status( const struct dc_plane_state *plane_state) { const struct dc_plane_status *plane_status; - struct dc *core_dc; + struct dc *dc; int i; if (!plane_state || @@ -148,15 +146,15 @@ const struct dc_plane_status *dc_plane_get_status( } plane_status = &plane_state->status; - core_dc = plane_state->ctx->dc; + dc = plane_state->ctx->dc; - if (core_dc->current_state == NULL) + if (dc->current_state == NULL) return NULL; /* Find the current plane state and set its pending bit to false */ - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = - &core_dc->current_state->res_ctx.pipe_ctx[i]; + &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state != plane_state) continue; @@ -166,14 +164,14 @@ const struct dc_plane_status *dc_plane_get_status( break; } - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = - &core_dc->current_state->res_ctx.pipe_ctx[i]; + &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state != plane_state) continue; - core_dc->hwss.update_pending_status(pipe_ctx); + dc->hwss.update_pending_status(pipe_ctx); } return plane_status; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 1dc065f1125c..2b2ee6893e25 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -945,15 +945,15 @@ void dce110_edp_backlight_control( void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ - struct dc *core_dc; + struct dc *dc; struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; if (!pipe_ctx->stream) return; - core_dc = pipe_ctx->stream->ctx->dc; - clk_mgr = core_dc->clk_mgr; + dc = pipe_ctx->stream->ctx->dc; + clk_mgr = dc->clk_mgr; if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; @@ -961,7 +961,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio) { for (i = 0; i < MAX_PIPES; i++) { /*current_state not updated yet*/ - if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL) + if (dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL) num_audio++; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 251bb59c271a..bd6cdb6b38f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1655,10 +1655,10 @@ void dcn10_enable_per_frame_crtc_position_reset( } /*static void print_rq_dlg_ttu( - struct dc *core_dc, + struct dc *dc, struct pipe_ctx *pipe_ctx) { - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\n============== DML TTU Output parameters [%d] ==============\n" "qos_level_low_wm: %d, \n" "qos_level_high_wm: %d, \n" @@ -1688,7 +1688,7 @@ void dcn10_enable_per_frame_crtc_position_reset( pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c ); - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\n============== DML DLG Output parameters [%d] ==============\n" "refcyc_h_blank_end: %d, \n" "dlg_vblank_end: %d, \n" @@ -1723,7 +1723,7 @@ void dcn10_enable_per_frame_crtc_position_reset( pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l ); - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\ndst_y_per_meta_row_nom_l: %d, \n" "refcyc_per_meta_chunk_nom_l: %d, \n" "refcyc_per_line_delivery_pre_l: %d, \n" @@ -1753,7 +1753,7 @@ void dcn10_enable_per_frame_crtc_position_reset( pipe_ctx->dlg_regs.refcyc_per_line_delivery_c ); - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\n============== DML RQ Output parameters [%d] ==============\n" "chunk_size: %d \n" "min_chunk_size: %d \n" diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index 3d4461a70f7d..378cc11aa047 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -204,7 +204,7 @@ bool dce110_vblank_set(struct irq_service *irq_service, bool enable) { struct dc_context *dc_ctx = irq_service->ctx; - struct dc *core_dc = irq_service->ctx->dc; + struct dc *dc = irq_service->ctx->dc; enum dc_irq_source dal_irq_src = dc_interrupt_to_irq_source(irq_service->ctx->dc, info->src_id, @@ -212,7 +212,7 @@ bool dce110_vblank_set(struct irq_service *irq_service, uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK; struct timing_generator *tg = - core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; + dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; if (enable) { if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) { -- cgit From f42ea55be1114768e36c4e8dff4400ca811309cf Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 5 Nov 2019 13:17:30 -0500 Subject: drm/amd/display: add separate of private hwss functions [Why] Some function pointers in the hwss function pointer table are meant to be hw sequencer entry points to be called from dc. However some of those function pointers are not meant to be entry points, but instead used as a code reuse/inheritance tool called directly by other hwss functions, not by dc. Therefore, we want a more clear separation of which functions we determine to be interface functions vs the functions we use within hwss. [How] DC interface functions will be stored in: struct hw_sequencer_funcs Functions used within HWSS will be stored in: struct hwseq_private_funcs Signed-off-by: Anthony Koo Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 6 + drivers/gpu/drm/amd/display/dc/core/dc_debug.c | 1 - drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 3 - drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c | 2 +- drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 6 +- .../amd/display/dc/dce100/dce100_hw_sequencer.c | 3 +- .../amd/display/dc/dce100/dce100_hw_sequencer.h | 1 + .../amd/display/dc/dce110/dce110_hw_sequencer.c | 77 +++-- .../amd/display/dc/dce110/dce110_hw_sequencer.h | 1 + .../drm/amd/display/dc/dce110/dce110_resource.c | 3 +- .../amd/display/dc/dce112/dce112_hw_sequencer.c | 2 +- .../amd/display/dc/dce112/dce112_hw_sequencer.h | 1 + .../amd/display/dc/dce120/dce120_hw_sequencer.c | 2 +- .../amd/display/dc/dce120/dce120_hw_sequencer.h | 1 + .../drm/amd/display/dc/dce80/dce80_hw_sequencer.c | 2 +- .../drm/amd/display/dc/dce80/dce80_hw_sequencer.h | 1 + .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 124 ++++--- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | 1 + drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c | 38 ++- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 71 ++-- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h | 3 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 54 +-- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c | 1 + drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h | 2 + drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c | 63 ++-- drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 370 +++++---------------- .../drm/amd/display/dc/inc/hw_sequencer_private.h | 156 +++++++++ 27 files changed, 525 insertions(+), 470 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2645d20e8c4c..e384c143bb58 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2004,6 +2004,12 @@ static void commit_planes_do_stream_update(struct dc *dc, dc->hwss.update_info_frame(pipe_ctx); } + if (stream_update->hdr_static_metadata && + stream->use_dynamic_meta && + dc->hwss.set_dmdata_attributes && + pipe_ctx->stream->dmdata_address.quad_part != 0) + dc->hwss.set_dmdata_attributes(pipe_ctx); + if (stream_update->gamut_remap) dc_stream_set_gamut_remap(dc, stream); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index bf13cffed703..502ed3c7959d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -33,7 +33,6 @@ #include "core_status.h" #include "core_types.h" -#include "hw_sequencer.h" #include "resource.h" diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 70b7c1eb8a8f..b43a4b115fd8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -33,9 +33,6 @@ #include "resource.h" #include "ipp.h" #include "timing_generator.h" -#if defined(CONFIG_DRM_AMD_DC_DCN) -#include "dcn10/dcn10_hw_sequencer.h" -#endif #define DC_LOGGER dc->ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c index 0275d6d60da4..e1c5839a80dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c @@ -25,7 +25,7 @@ #include "dce_hwseq.h" #include "reg_helper.h" -#include "hw_sequencer.h" +#include "hw_sequencer_private.h" #include "core_types.h" #define CTX \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index bff03a68aa01..c5aa1f48593a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -25,7 +25,7 @@ #ifndef __DCE_HWSEQ_H__ #define __DCE_HWSEQ_H__ -#include "hw_sequencer.h" +#include "dc_types.h" #define BL_REG_LIST()\ SR(LVTMA_PWRSEQ_CNTL), \ @@ -811,6 +811,10 @@ enum blnd_mode { BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */ }; +struct dce_hwseq; +struct pipe_ctx; +struct clock_source; + void dce_enable_fe_clock(struct dce_hwseq *hwss, unsigned int inst, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c index 799d36299c9b..753cb8edd996 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c @@ -26,7 +26,6 @@ #include "dc.h" #include "core_types.h" #include "clk_mgr.h" -#include "hw_sequencer.h" #include "dce100_hw_sequencer.h" #include "resource.h" @@ -136,7 +135,7 @@ void dce100_hw_sequencer_construct(struct dc *dc) { dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index a6b80fdaa666..34518da20009 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE100_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; struct dc_state; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 2b2ee6893e25..4939cf3b316f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -653,10 +653,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = pipe_ctx->stream->link->cur_link_settings.lane_count; - struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; struct dc_link *link = pipe_ctx->stream->link; - + const struct dc *dc = link->dc; uint32_t active_total_with_borders; uint32_t early_control = 0; @@ -669,7 +668,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, pipe_ctx->stream_res.stream_enc->id, true); - link->dc->hwss.update_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); /* enable early control to avoid corruption on DP monitor*/ active_total_with_borders = @@ -1049,6 +1048,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; /* only 3 items below are used by unblank */ params.timing = pipe_ctx->stream->timing; @@ -1058,7 +1058,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, true); + hws->funcs.edp_backlight_control(link, true); } } @@ -1066,9 +1066,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, false); + hws->funcs.edp_backlight_control(link, false); dc_link_set_abm_disable(link); } @@ -1325,9 +1326,10 @@ static enum dc_status apply_single_controller_ctx_to_hw( struct drr_params params = {0}; unsigned int event_triggers = 0; struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; + struct dce_hwseq *hws = dc->hwseq; - if (dc->hwss.disable_stream_gating) { - dc->hwss.disable_stream_gating(dc, pipe_ctx); + if (hws->funcs.disable_stream_gating) { + hws->funcs.disable_stream_gating(dc, pipe_ctx); } if (pipe_ctx->stream_res.audio != NULL) { @@ -1357,10 +1359,10 @@ static enum dc_status apply_single_controller_ctx_to_hw( /* */ /* Do not touch stream timing on seamless boot optimization. */ if (!pipe_ctx->stream->apply_seamless_boot_optimization) - dc->hwss.enable_stream_timing(pipe_ctx, context, dc); + hws->funcs.enable_stream_timing(pipe_ctx, context, dc); - if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(dc, pipe_ctx); + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -1553,9 +1555,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) bool can_apply_edp_fast_boot = false; bool can_apply_seamless_boot = false; bool keep_edp_vdd_on = false; + struct dce_hwseq *hws = dc->hwseq; - if (dc->hwss.init_pipes) - dc->hwss.init_pipes(dc, context); + if (hws->funcs.init_pipes) + hws->funcs.init_pipes(dc, context); edp_stream = get_edp_stream(context); @@ -1592,7 +1595,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) { if (edp_link_with_sink && !keep_edp_vdd_on) { /*turn off backlight before DP_blank and encoder powered down*/ - dc->hwss.edp_backlight_control(edp_link_with_sink, false); + hws->funcs.edp_backlight_control(edp_link_with_sink, false); } /*resume from S3, no vbios posting, no need to power down again*/ power_down_all_hw_blocks(dc); @@ -2007,13 +2010,14 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; enum dc_status status; int i; /* Reset old context */ /* look up the targets that have been removed since last commit */ - dc->hwss.reset_hw_ctx_wrap(dc, context); + hws->funcs.reset_hw_ctx_wrap(dc, context); /* Skip applying if no targets */ if (context->stream_count <= 0) @@ -2038,7 +2042,7 @@ enum dc_status dce110_apply_ctx_to_hw( continue; } - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, i, dc->ctx->dc_bios, PIPE_GATING_CONTROL_DISABLE); } @@ -2347,19 +2351,20 @@ static void init_hw(struct dc *dc) struct transform *xfm; struct abm *abm; struct dmcu *dmcu; + struct dce_hwseq *hws = dc->hwseq; bp = dc->ctx->dc_bios; for (i = 0; i < dc->res_pool->pipe_count; i++) { xfm = dc->res_pool->transforms[i]; xfm->funcs->transform_reset(xfm); - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, i, bp, PIPE_GATING_CONTROL_INIT); - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, i, bp, PIPE_GATING_CONTROL_DISABLE); - dc->hwss.enable_display_pipe_clock_gating( + hws->funcs.enable_display_pipe_clock_gating( dc->ctx, true); } @@ -2445,6 +2450,8 @@ static void dce110_program_front_end_for_pipe( struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; unsigned int i; + struct dce_hwseq *hws = dc->hwseq; + DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); @@ -2503,10 +2510,10 @@ static void dce110_program_front_end_for_pipe( if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); DC_LOG_SURFACE( "Pipe:%d %p: addr hi:0x%x, " @@ -2609,6 +2616,7 @@ static void dce110_apply_ctx_for_surface( static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; int fe_idx = pipe_ctx->plane_res.mi ? pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx; @@ -2616,7 +2624,7 @@ static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream) return; - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE); dc->res_pool->transforms[fe_idx]->funcs->transform_reset( @@ -2705,14 +2713,10 @@ static const struct hw_sequencer_funcs dce110_funcs = { .program_gamut_remap = program_gamut_remap, .program_output_csc = program_output_csc, .init_hw = init_hw, - .init_pipes = init_pipes, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dce110_apply_ctx_for_surface, .update_plane_addr = update_plane_addr, .update_pending_status = dce110_update_pending_status, - .set_input_transfer_func = dce110_set_input_transfer_func, - .set_output_transfer_func = dce110_set_output_transfer_func, - .power_down = dce110_power_down, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dce110_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset, @@ -2723,8 +2727,6 @@ static const struct hw_sequencer_funcs dce110_funcs = { .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, - .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating, - .enable_display_power_gating = dce110_enable_display_power_gating, .disable_plane = dce110_power_down_fe, .pipe_control_lock = dce_pipe_control_lock, .prepare_bandwidth = dce110_prepare_bandwidth, @@ -2732,22 +2734,33 @@ static const struct hw_sequencer_funcs dce110_funcs = { .set_drr = set_drr, .get_position = get_position, .set_static_screen_control = set_static_screen_control, - .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, - .enable_stream_timing = dce110_enable_stream_timing, - .disable_stream_gating = NULL, - .enable_stream_gating = NULL, .setup_stereo = NULL, .set_avmute = dce110_set_avmute, .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dce110_set_cursor_position, .set_cursor_attribute = dce110_set_cursor_attribute }; +static const struct hwseq_private_funcs dce110_private_funcs = { + .init_pipes = init_pipes, + .update_plane_addr = update_plane_addr, + .set_input_transfer_func = dce110_set_input_transfer_func, + .set_output_transfer_func = dce110_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating, + .enable_display_power_gating = dce110_enable_display_power_gating, + .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, + .enable_stream_timing = dce110_enable_stream_timing, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .edp_backlight_control = dce110_edp_backlight_control, +}; + void dce110_hw_sequencer_construct(struct dc *dc) { dc->hwss = dce110_funcs; + dc->hwseq->funcs = dce110_private_funcs; } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index c639e1680b7b..26a9c14a58b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE110_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; struct dc_state; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index a535e2cda694..bf14e9ab040c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -1097,6 +1097,7 @@ static struct pipe_ctx *dce110_acquire_underlay( struct dc_stream_state *stream) { struct dc *dc = stream->ctx->dc; + struct dce_hwseq *hws = dc->hwseq; struct resource_context *res_ctx = &context->res_ctx; unsigned int underlay_idx = pool->underlay_pipe_index; struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; @@ -1117,7 +1118,7 @@ static struct pipe_ctx *dce110_acquire_underlay( struct tg_color black_color = {0}; struct dc_bios *dcb = dc->ctx->dc_bios; - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, pipe_ctx->stream_res.tg->inst, dcb, PIPE_GATING_CONTROL_DISABLE); diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c index 1e4a7c13f0ed..19873ee1f78d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c @@ -158,6 +158,6 @@ void dce112_hw_sequencer_construct(struct dc *dc) * structure */ dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce112_enable_display_power_gating; } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h index e646f4a37fa2..943f1b2c5b2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE112_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index 1ca30928025e..66a13aa39c95 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c @@ -265,7 +265,7 @@ void dce120_hw_sequencer_construct(struct dc *dc) * structure */ dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce120_enable_display_power_gating; dc->hwss.update_dchub = dce120_update_dchub; } diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h index c51afbd0b012..bc024534732f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE120_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index c4543178ba20..893261c81854 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c @@ -74,7 +74,7 @@ void dce80_hw_sequencer_construct(struct dc *dc) { dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.pipe_control_lock = dce_pipe_control_lock; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h index 7a1b31def66f..e43af832d00c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE80_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index bd6cdb6b38f6..2b3081ee0e07 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -642,8 +642,8 @@ static void power_on_plane( if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - hws->ctx->dc->hwss.dpp_pg_control(hws, plane_id, true); - hws->ctx->dc->hwss.hubp_pg_control(hws, plane_id, true); + hws->funcs.dpp_pg_control(hws, plane_id, true); + hws->funcs.hubp_pg_control(hws, plane_id, true); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); DC_LOG_DEBUG( @@ -664,7 +664,7 @@ static void undo_DEGVIDCN10_253_wa(struct dc *dc) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - dc->hwss.hubp_pg_control(hws, 0, false); + hws->funcs.hubp_pg_control(hws, 0, false); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); @@ -693,7 +693,7 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - dc->hwss.hubp_pg_control(hws, 0, true); + hws->funcs.hubp_pg_control(hws, 0, true); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); @@ -703,12 +703,14 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc) void dcn10_bios_golden_init(struct dc *dc) { + struct dce_hwseq *hws = dc->hwseq; struct dc_bios *bp = dc->ctx->dc_bios; int i; bool allow_self_fresh_force_enable = true; - if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc)) + if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc)) return; + if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled) allow_self_fresh_force_enable = dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub); @@ -1015,6 +1017,7 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) /* trigger HW to start disconnect plane from stream on the next vsync */ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; int dpp_id = pipe_ctx->plane_res.dpp->inst; struct mpc *mpc = dc->res_pool->mpc; @@ -1039,7 +1042,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->funcs->hubp_disconnect(hubp); if (dc->debug.sanity_checks) - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } void dcn10_plane_atomic_power_down(struct dc *dc, @@ -1052,8 +1055,8 @@ void dcn10_plane_atomic_power_down(struct dc *dc, if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - dc->hwss.dpp_pg_control(hws, dpp->inst, false); - dc->hwss.hubp_pg_control(hws, hubp->inst, false); + hws->funcs.dpp_pg_control(hws, dpp->inst, false); + hws->funcs.hubp_pg_control(hws, hubp->inst, false); dpp->funcs->dpp_reset(dpp); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); @@ -1067,6 +1070,7 @@ void dcn10_plane_atomic_power_down(struct dc *dc, */ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; int opp_id = hubp->opp_id; @@ -1085,7 +1089,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->power_gated = true; dc->optimized_required = false; /* We're powering off, no need to optimize */ - dc->hwss.plane_atomic_power_down(dc, + hws->funcs.plane_atomic_power_down(dc, pipe_ctx->plane_res.dpp, pipe_ctx->plane_res.hubp); @@ -1099,12 +1103,13 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; DC_LOGGER_INIT(dc->ctx->logger); if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) return; - dc->hwss.plane_atomic_disable(dc, pipe_ctx); + hws->funcs.plane_atomic_disable(dc, pipe_ctx); apply_DEGVIDCN10_253_wa(dc); @@ -1115,6 +1120,7 @@ void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) void dcn10_init_pipes(struct dc *dc, struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; bool can_apply_seamless_boot = false; for (i = 0; i < context->stream_count; i++) { @@ -1139,8 +1145,8 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) * command table. */ if (tg->funcs->is_tg_enabled(tg)) { - if (dc->hwss.init_blank != NULL) { - dc->hwss.init_blank(dc, tg); + if (hws->funcs.init_blank != NULL) { + hws->funcs.init_blank(dc, tg); tg->funcs->lock(tg); } else { tg->funcs->lock(tg); @@ -1197,7 +1203,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; - dc->hwss.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); if (tg->funcs->is_tg_enabled(tg)) tg->funcs->unlock(tg); @@ -1243,15 +1249,15 @@ void dcn10_init_hw(struct dc *dc) } //Enable ability to power gate / don't force power on permanently - dc->hwss.enable_power_gating_plane(hws, true); + hws->funcs.enable_power_gating_plane(hws, true); return; } if (!dcb->funcs->is_accelerated_mode(dcb)) - dc->hwss.disable_vga(dc->hwseq); + hws->funcs.disable_vga(dc->hwseq); - dc->hwss.bios_golden_init(dc); + hws->funcs.bios_golden_init(dc); if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; @@ -1294,8 +1300,8 @@ void dcn10_init_hw(struct dc *dc) /* Power gate DSCs */ for (i = 0; i < res_pool->res_cap->num_dsc; i++) - if (dc->hwss.dsc_pg_control != NULL) - dc->hwss.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); + if (hws->funcs.dsc_pg_control != NULL) + hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -1304,7 +1310,7 @@ void dcn10_init_hw(struct dc *dc) * everything down. */ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { - dc->hwss.init_pipes(dc, dc->current_state); + hws->funcs.init_pipes(dc, dc->current_state); } for (i = 0; i < res_pool->audio_count; i++) { @@ -1336,7 +1342,7 @@ void dcn10_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - dc->hwss.enable_power_gating_plane(dc->hwseq, true); + hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); @@ -1348,6 +1354,7 @@ void dcn10_reset_hw_ctx_wrap( struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; /* Reset Back End*/ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { @@ -1366,8 +1373,8 @@ void dcn10_reset_hw_ctx_wrap( struct clock_source *old_clk = pipe_ctx_old->clock_source; dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); - if (dc->hwss.enable_stream_gating) - dc->hwss.enable_stream_gating(dc, pipe_ctx); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -1545,6 +1552,8 @@ void dcn10_pipe_control_lock( struct pipe_ctx *pipe, bool lock) { + struct dce_hwseq *hws = dc->hwseq; + /* use TG master update lock to lock everything on the TG * therefore only top pipe need to lock */ @@ -1552,7 +1561,7 @@ void dcn10_pipe_control_lock( return; if (dc->debug.sanity_checks) - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); if (lock) pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); @@ -1560,7 +1569,7 @@ void dcn10_pipe_control_lock( pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); if (dc->debug.sanity_checks) - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } static bool wait_for_reset_trigger_to_occur( @@ -1868,7 +1877,7 @@ static void dcn10_enable_plane( struct dce_hwseq *hws = dc->hwseq; if (dc->debug.sanity_checks) { - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } undo_DEGVIDCN10_253_wa(dc); @@ -1925,7 +1934,7 @@ static void dcn10_enable_plane( dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp); if (dc->debug.sanity_checks) { - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } } @@ -2102,6 +2111,7 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = {{0}}; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; @@ -2111,10 +2121,10 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { - dc->hwss.get_hdr_visual_confirm_color( + hws->funcs.get_hdr_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { - dc->hwss.get_surface_visual_confirm_color( + hws->funcs.get_surface_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else { color_space_to_black_color( @@ -2201,6 +2211,7 @@ static void dcn10_update_dchubp_dpp( struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -2259,7 +2270,7 @@ static void dcn10_update_dchubp_dpp( if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.per_pixel_alpha_change || plane_state->update_flags.bits.global_alpha_change) - dc->hwss.update_mpcc(dc, pipe_ctx); + hws->funcs.update_mpcc(dc, pipe_ctx); if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.per_pixel_alpha_change || @@ -2319,7 +2330,7 @@ static void dcn10_update_dchubp_dpp( hubp->power_gated = false; - dc->hwss.update_plane_addr(dc, pipe_ctx); + hws->funcs.update_plane_addr(dc, pipe_ctx); if (is_pipe_tree_visible(pipe_ctx)) hubp->funcs->set_blank(hubp, false); @@ -2395,17 +2406,19 @@ void dcn10_program_pipe( struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; + if (pipe_ctx->plane_state->update_flags.bits.full_update) dcn10_enable_plane(dc, pipe_ctx, context); dcn10_update_dchubp_dpp(dc, pipe_ctx, context); - dc->hwss.set_hdr_multiplier(pipe_ctx); + hws->funcs.set_hdr_multiplier(pipe_ctx); if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); /* dcn10_translate_regamma_to_hw_format takes 750us to finish * only do gamma programming for full update. @@ -2414,7 +2427,7 @@ void dcn10_program_pipe( * doing heavy calculation and programming */ if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); } static void dcn10_program_all_pipe_in_tree( @@ -2422,6 +2435,8 @@ static void dcn10_program_all_pipe_in_tree( struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; + if (pipe_ctx->top_pipe == NULL) { bool blank = !is_pipe_tree_visible(pipe_ctx); @@ -2435,14 +2450,14 @@ static void dcn10_program_all_pipe_in_tree( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(dc, pipe_ctx); + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); } if (pipe_ctx->plane_state != NULL) - dc->hwss.program_pipe(dc, pipe_ctx, context); + hws->funcs.program_pipe(dc, pipe_ctx, context); if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); @@ -2478,6 +2493,7 @@ void dcn10_apply_ctx_for_surface( int num_planes, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; int i; struct timing_generator *tg; uint32_t underflow_check_delay_us; @@ -2497,8 +2513,8 @@ void dcn10_apply_ctx_for_surface( underflow_check_delay_us = dc->debug.underflow_assert_delay_us; - if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur) - ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program)); + if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) + ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program)); if (interdependent_update) dcn10_lock_all_pipes(dc, context, true); @@ -2508,12 +2524,12 @@ void dcn10_apply_ctx_for_surface( if (underflow_check_delay_us != 0xFFFFFFFF) udelay(underflow_check_delay_us); - if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur) - ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program)); + if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) + ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program)); if (num_planes == 0) { /* OTG blank before remove all front end */ - dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true); + hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true); } /* Disconnect unused mpcc */ @@ -2539,7 +2555,7 @@ void dcn10_apply_ctx_for_surface( old_pipe_ctx->plane_state && old_pipe_ctx->stream_res.tg == tg) { - dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx); removed_pipe[i] = true; DC_LOG_DC("Reset mpcc for pipe %d\n", @@ -2551,8 +2567,8 @@ void dcn10_apply_ctx_for_surface( dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context); /* Program secondary blending tree and writeback pipes */ - if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) - dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); + if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree)) + hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context); if (interdependent_update) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -2609,10 +2625,11 @@ void dcn10_prepare_bandwidth( struct dc *dc, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubbub *hubbub = dc->res_pool->hubbub; if (dc->debug.sanity_checks) - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (context->stream_count == 0) @@ -2634,17 +2651,18 @@ void dcn10_prepare_bandwidth( dcn_bw_notify_pplib_of_wm_ranges(dc); if (dc->debug.sanity_checks) - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } void dcn10_optimize_bandwidth( struct dc *dc, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubbub *hubbub = dc->res_pool->hubbub; if (dc->debug.sanity_checks) - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (context->stream_count == 0) @@ -2666,7 +2684,7 @@ void dcn10_optimize_bandwidth( dcn_bw_notify_pplib_of_wm_ranges(dc); if (dc->debug.sanity_checks) - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } void dcn10_set_drr(struct pipe_ctx **pipe_ctx, @@ -2808,10 +2826,11 @@ void dcn10_wait_for_mpcc_disconnect( struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; int mpcc_inst; if (dc->debug.sanity_checks) { - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } if (!pipe_ctx->stream_res.opp) @@ -2828,7 +2847,7 @@ void dcn10_wait_for_mpcc_disconnect( } if (dc->debug.sanity_checks) { - dc->hwss.verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } } @@ -3127,6 +3146,7 @@ void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; /* only 3 items below are used by unblank */ params.timing = pipe_ctx->stream->timing; @@ -3140,7 +3160,7 @@ void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, true); + hws->funcs.edp_backlight_control(link, true); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 5aad3922be6c..55b8f3b2fc4e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCN10_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index 38923f3120ee..e7e5352ec424 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -23,25 +23,19 @@ * */ +#include "hw_sequencer_private.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn10_hw_sequencer.h" static const struct hw_sequencer_funcs dcn10_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, - .init_pipes = dcn10_init_pipes, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, .update_plane_addr = dcn10_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, - .program_pipe = dcn10_program_pipe, .update_dchub = dcn10_update_dchub, - .update_mpcc = dcn10_update_mpcc, .update_pending_status = dcn10_update_pending_status, - .set_input_transfer_func = dcn10_set_input_transfer_func, - .set_output_transfer_func = dcn10_set_output_transfer_func, .program_output_csc = dcn10_program_output_csc, - .power_down = dce110_power_down, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dcn10_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, @@ -53,14 +47,10 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, - .enable_display_power_gating = dcn10_dummy_display_power_gating, .disable_plane = dcn10_disable_plane, - .blank_pixel_data = dcn10_blank_pixel_data, .pipe_control_lock = dcn10_pipe_control_lock, .prepare_bandwidth = dcn10_prepare_bandwidth, .optimize_bandwidth = dcn10_optimize_bandwidth, - .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, - .enable_stream_timing = dcn10_enable_stream_timing, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn10_set_static_screen_control, @@ -70,18 +60,34 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .disable_stream_gating = NULL, - .enable_stream_gating = NULL, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +static const struct hwseq_private_funcs dcn10_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn10_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .program_pipe = dcn10_program_pipe, + .update_mpcc = dcn10_update_mpcc, + .set_input_transfer_func = dcn10_set_input_transfer_func, + .set_output_transfer_func = dcn10_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn10_blank_pixel_data, + .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, + .enable_stream_timing = dcn10_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = NULL, .disable_vga = dcn10_disable_vga, @@ -96,10 +102,10 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, .set_hdr_multiplier = dcn10_set_hdr_multiplier, .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, }; void dcn10_hw_sequencer_construct(struct dc *dc) { dc->hwss = dcn10_funcs; + dc->hwseq->funcs = dcn10_private_funcs; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index d5c18fb4436a..619af3b3029d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -272,6 +272,7 @@ void dcn20_init_blank( struct dc *dc, struct timing_generator *tg) { + struct dce_hwseq *hws = dc->hwseq; enum dc_color_space color_space; struct tg_color black_color = {0}; struct output_pixel_processor *opp = NULL; @@ -319,7 +320,7 @@ void dcn20_init_blank( otg_active_height); } - dc->hwss.wait_for_blank_complete(opp); + hws->funcs.wait_for_blank_complete(opp); } void dcn20_dsc_pg_control( @@ -552,6 +553,7 @@ void dcn20_hubp_pg_control( */ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; @@ -572,7 +574,7 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->power_gated = true; dc->optimized_required = false; /* We're powering off, no need to optimize */ - dc->hwss.plane_atomic_power_down(dc, + hws->funcs.plane_atomic_power_down(dc, pipe_ctx->plane_res.dpp, pipe_ctx->plane_res.hubp); @@ -603,6 +605,7 @@ enum dc_status dcn20_enable_stream_timing( struct dc_state *context, struct dc *dc) { + struct dce_hwseq *hws = dc->hwseq; struct dc_stream_state *stream = pipe_ctx->stream; struct drr_params params = {0}; unsigned int event_triggers = 0; @@ -662,7 +665,7 @@ enum dc_status dcn20_enable_stream_timing( pipe_ctx->stream_res.opp, true); - dc->hwss.blank_pixel_data(dc, pipe_ctx, true); + hws->funcs.blank_pixel_data(dc, pipe_ctx, true); /* VTG is within DCHUB command block. DCFCLK is always on */ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { @@ -670,7 +673,7 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } - dc->hwss.wait_for_blank_complete(pipe_ctx->stream_res.opp); + hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -820,6 +823,7 @@ bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { + struct dce_hwseq *hws = dc->hwseq; struct dpp *dpp_base = pipe_ctx->plane_res.dpp; const struct dc_transfer_func *tf = NULL; bool result = true; @@ -828,8 +832,8 @@ bool dcn20_set_input_transfer_func(struct dc *dc, if (dpp_base == NULL || plane_state == NULL) return false; - dc->hwss.set_shaper_3dlut(pipe_ctx, plane_state); - dc->hwss.set_blend_lut(pipe_ctx, plane_state); + hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); + hws->funcs.set_blend_lut(pipe_ctx, plane_state); if (plane_state->in_transfer_func) tf = plane_state->in_transfer_func; @@ -1292,6 +1296,7 @@ static void dcn20_update_dchubp_dpp( struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -1356,7 +1361,7 @@ static void dcn20_update_dchubp_dpp( old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; } } - dc->hwss.update_mpcc(dc, pipe_ctx); + hws->funcs.update_mpcc(dc, pipe_ctx); } if (pipe_ctx->update_flags.bits.scaler || @@ -1431,7 +1436,7 @@ static void dcn20_update_dchubp_dpp( } if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) - dc->hwss.update_plane_addr(dc, pipe_ctx); + hws->funcs.update_plane_addr(dc, pipe_ctx); if (pipe_ctx->update_flags.bits.enable) hubp->funcs->set_blank(hubp, false); @@ -1443,10 +1448,11 @@ static void dcn20_program_pipe( struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; /* Only need to unblank on top pipe */ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) - dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); + hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); if (pipe_ctx->update_flags.bits.global_sync) { pipe_ctx->stream_res.tg->funcs->program_global_sync( @@ -1459,12 +1465,12 @@ static void dcn20_program_pipe( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(dc, pipe_ctx); + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); } if (pipe_ctx->update_flags.bits.odm) - dc->hwss.update_odm(dc, context, pipe_ctx); + hws->funcs.update_odm(dc, context, pipe_ctx); if (pipe_ctx->update_flags.bits.enable) dcn20_enable_plane(dc, pipe_ctx, context); @@ -1474,19 +1480,19 @@ static void dcn20_program_pipe( if (pipe_ctx->update_flags.bits.enable || pipe_ctx->plane_state->update_flags.bits.hdr_mult) - dc->hwss.set_hdr_multiplier(pipe_ctx); + hws->funcs.set_hdr_multiplier(pipe_ctx); if (pipe_ctx->update_flags.bits.enable || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); /* dcn10_translate_regamma_to_hw_format takes 750us to finish * only do gamma programming for powering on, internal memcmp to avoid * updating on slave planes */ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) - dc->hwss.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); /* If the pipe has been enabled or has a different opp, we * should reprogram the fmt. This deals with cases where @@ -1526,6 +1532,7 @@ void dcn20_program_front_end_for_ctx( { const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; int i; + struct dce_hwseq *hws = dc->hwseq; bool pipe_locked[MAX_PIPES] = {false}; DC_LOGGER_INIT(dc->ctx->logger); @@ -1557,13 +1564,13 @@ void dcn20_program_front_end_for_ctx( && !context->res_ctx.pipe_ctx[i].top_pipe && !context->res_ctx.pipe_ctx[i].prev_odm_pipe && context->res_ctx.pipe_ctx[i].stream) - dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); + hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); /* Disconnect mpcc */ for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { - dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } @@ -1583,8 +1590,8 @@ void dcn20_program_front_end_for_ctx( pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0 && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw) - && dc->hwss.program_all_writeback_pipes_in_tree) - dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); + && hws->funcs.program_all_writeback_pipes_in_tree) + hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); } } @@ -1669,6 +1676,7 @@ bool dcn20_update_bandwidth( struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) @@ -1698,10 +1706,10 @@ bool dcn20_update_bandwidth( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); if (pipe_ctx->prev_odm_pipe == NULL) - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); - if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(dc, pipe_ctx); + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); } pipe_ctx->plane_res.hubp->funcs->hubp_setup( @@ -1938,6 +1946,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; struct pipe_ctx *odm_pipe; params.opp_cnt = 1; @@ -1958,7 +1967,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, true); + hws->funcs.edp_backlight_control(link, true); } } @@ -2046,6 +2055,7 @@ void dcn20_reset_hw_ctx_wrap( struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; /* Reset Back End*/ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { @@ -2064,8 +2074,8 @@ void dcn20_reset_hw_ctx_wrap( struct clock_source *old_clk = pipe_ctx_old->clock_source; dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); - if (dc->hwss.enable_stream_gating) - dc->hwss.enable_stream_gating(dc, pipe_ctx); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -2096,6 +2106,7 @@ void dcn20_get_mpctree_visual_confirm_color( void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = { {0} }; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; @@ -2106,10 +2117,10 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) // input to MPCC is always RGB, by default leave black_color at 0 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { - dc->hwss.get_hdr_visual_confirm_color( + hws->funcs.get_hdr_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { - dc->hwss.get_surface_visual_confirm_color( + hws->funcs.get_surface_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { dcn20_get_mpctree_visual_confirm_color( @@ -2265,13 +2276,13 @@ void dcn20_fpga_init_hw(struct dc *dc) res_pool->dccg->funcs->dccg_init(res_pool->dccg); //Enable ability to power gate / don't force power on permanently - dc->hwss.enable_power_gating_plane(hws, true); + hws->funcs.enable_power_gating_plane(hws, true); // Specific to FPGA dccg and registers REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); - dc->hwss.dccg_init(hws); + hws->funcs.dccg_init(hws); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); @@ -2335,7 +2346,7 @@ void dcn20_fpga_init_hw(struct dc *dc) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; /*to do*/ - dc->hwss.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); } /* initialize DWB pointer to MCIF_WB */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 28aaceed6d8b..eecd7a26ec4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -26,6 +26,8 @@ #ifndef __DC_HWSS_DCN20_H__ #define __DC_HWSS_DCN20_H__ +#include "hw_sequencer_private.h" + bool dcn20_set_blend_lut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); bool dcn20_set_shaper_3dlut( @@ -111,6 +113,7 @@ void dcn20_disable_writeback( void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx); void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx); +void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx); void dcn20_init_vm_ctx( struct dce_hwseq *hws, struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 51b6c25aa3c5..d51e02fdab4d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -30,19 +30,13 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, - .init_pipes = dcn10_init_pipes, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, .update_plane_addr = dcn20_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_dchub = dcn10_update_dchub, - .update_mpcc = dcn20_update_mpcc, .update_pending_status = dcn10_update_pending_status, - .set_input_transfer_func = dcn20_set_input_transfer_func, - .set_output_transfer_func = dcn20_set_output_transfer_func, .program_output_csc = dcn20_program_output_csc, - .power_down = dce110_power_down, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dcn10_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, @@ -54,16 +48,12 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, - .enable_display_power_gating = dcn10_dummy_display_power_gating, .disable_plane = dcn20_disable_plane, - .blank_pixel_data = dcn20_blank_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .pipe_control_lock_global = dcn20_pipe_control_lock_global, .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn10_set_static_screen_control, @@ -73,18 +63,42 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .disable_stream_gating = dcn20_disable_stream_gating, - .enable_stream_gating = dcn20_enable_stream_gating, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn20_enable_writeback, + .disable_writeback = dcn20_disable_writeback, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn20_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn20_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +static const struct hwseq_private_funcs dcn20_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn20_init_blank, .disable_vga = dcn20_disable_vga, @@ -95,15 +109,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, .dsc_pg_control = NULL, - .program_triplebuffer = dcn20_program_triple_buffer, - .enable_writeback = dcn20_enable_writeback, - .disable_writeback = dcn20_disable_writeback, .update_odm = dcn20_update_odm, - .dmdata_status_done = dcn20_dmdata_status_done, - .program_dmdata_engine = dcn20_program_dmdata_engine, - .init_sys_ctx = dcn20_init_sys_ctx, - .init_vm_ctx = dcn20_init_vm_ctx, - .set_flip_control_gsl = dcn20_set_flip_control_gsl, .dsc_pg_control = dcn20_dsc_pg_control, .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, @@ -113,15 +119,15 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn20_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, }; void dcn20_hw_sequencer_construct(struct dc *dc) { dc->hwss = dcn20_funcs; + dc->hwseq->funcs = dcn20_private_funcs; if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwss.init_pipes = NULL; + dc->hwseq->funcs.init_pipes = NULL; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 005894dcabc9..081ad8e43d58 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -28,6 +28,7 @@ #include "core_types.h" #include "resource.h" #include "dce/dce_hwseq.h" +#include "dcn21_hwseq.h" #include "vmid.h" #include "reg_helper.h" #include "hw/clk_mgr.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h index 2f7b8a220eb9..182736096123 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h @@ -26,6 +26,8 @@ #ifndef __DC_HWSS_DCN21_H__ #define __DC_HWSS_DCN21_H__ +#include "hw_sequencer_private.h" + struct dc; int dcn21_init_sys_ctx(struct dce_hwseq *hws, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 1d8b67b4e252..4861aa5c59ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -31,19 +31,13 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, - .init_pipes = dcn10_init_pipes, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, .update_plane_addr = dcn20_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_dchub = dcn10_update_dchub, - .update_mpcc = dcn20_update_mpcc, .update_pending_status = dcn10_update_pending_status, - .set_input_transfer_func = dcn20_set_input_transfer_func, - .set_output_transfer_func = dcn20_set_output_transfer_func, .program_output_csc = dcn20_program_output_csc, - .power_down = dce110_power_down, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dcn10_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, @@ -55,16 +49,12 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, - .enable_display_power_gating = dcn10_dummy_display_power_gating, .disable_plane = dcn20_disable_plane, - .blank_pixel_data = dcn20_blank_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .pipe_control_lock_global = dcn20_pipe_control_lock_global, .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn10_set_static_screen_control, @@ -74,18 +64,49 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .disable_stream_gating = dcn20_disable_stream_gating, - .enable_stream_gating = dcn20_enable_stream_gating, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn20_enable_writeback, + .disable_writeback = dcn20_disable_writeback, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn20_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn21_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, +}; + +static const struct hwseq_private_funcs dcn21_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn20_init_blank, .disable_vga = dcn20_disable_vga, @@ -96,36 +117,26 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, .dsc_pg_control = NULL, - .program_triplebuffer = dcn20_program_triple_buffer, - .enable_writeback = dcn20_enable_writeback, - .disable_writeback = dcn20_disable_writeback, .update_odm = dcn20_update_odm, - .dmdata_status_done = dcn20_dmdata_status_done, - .program_dmdata_engine = dcn20_program_dmdata_engine, - .init_sys_ctx = dcn21_init_sys_ctx, - .init_vm_ctx = dcn20_init_vm_ctx, - .set_flip_control_gsl = dcn20_set_flip_control_gsl, .dsc_pg_control = dcn20_dsc_pg_control, .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, .set_hdr_multiplier = dcn10_set_hdr_multiplier, .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, .s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa, - .optimize_pwr_state = dcn21_optimize_pwr_state, - .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .wait_for_blank_complete = dcn20_wait_for_blank_complete, .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn20_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, }; void dcn21_hw_sequencer_construct(struct dc *dc) { dc->hwss = dcn21_funcs; + dc->hwseq->funcs = dcn21_private_funcs; if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwss.init_pipes = NULL; + dc->hwseq->funcs.init_pipes = NULL; } } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 5941577d78a5..e9c6021a5372 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -32,38 +32,11 @@ #include "inc/hw/link_encoder.h" #include "core_status.h" -enum pipe_gating_control { - PIPE_GATING_CONTROL_DISABLE = 0, - PIPE_GATING_CONTROL_ENABLE, - PIPE_GATING_CONTROL_INIT -}; - enum vline_select { VLINE0, VLINE1 }; -struct dce_hwseq_wa { - bool blnd_crtc_trigger; - bool DEGVIDCN10_253; - bool false_optc_underflow; - bool DEGVIDCN10_254; - bool DEGVIDCN21; -}; - -struct hwseq_wa_state { - bool DEGVIDCN10_253_applied; -}; - -struct dce_hwseq { - struct dc_context *ctx; - const struct dce_hwseq_registers *regs; - const struct dce_hwseq_shift *shifts; - const struct dce_hwseq_mask *masks; - struct dce_hwseq_wa wa; - struct hwseq_wa_state wa_state; -}; - struct pipe_ctx; struct dc_state; struct dc_stream_status; @@ -71,255 +44,110 @@ struct dc_writeback_info; struct dchub_init_data; struct dc_static_screen_events; struct resource_pool; -struct resource_context; -struct stream_resource; struct dc_phy_addr_space_config; struct dc_virtual_addr_space_config; -struct hubp; struct dpp; +struct dce_hwseq; struct hw_sequencer_funcs { + /* Embedded Display Related */ + void (*edp_power_control)(struct dc_link *link, bool enable); + void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); - void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); - + /* Pipe Programming Related */ void (*init_hw)(struct dc *dc); - - void (*init_pipes)(struct dc *dc, struct dc_state *context); - - enum dc_status (*apply_ctx_to_hw)( - struct dc *dc, struct dc_state *context); - - void (*reset_hw_ctx_wrap)( - struct dc *dc, struct dc_state *context); - - void (*apply_ctx_for_surface)( - struct dc *dc, + void (*enable_accelerated_mode)(struct dc *dc, + struct dc_state *context); + enum dc_status (*apply_ctx_to_hw)(struct dc *dc, + struct dc_state *context); + void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, - int num_planes, + int num_planes, struct dc_state *context); + void (*program_front_end_for_ctx)(struct dc *dc, struct dc_state *context); - - void (*program_gamut_remap)( + void (*update_plane_addr)(const struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*program_output_csc)(struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum dc_color_space colorspace, - uint16_t *matrix, - int opp_id); - - void (*program_front_end_for_ctx)( - struct dc *dc, - struct dc_state *context); - void (*program_triplebuffer)( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enableTripleBuffer); - void (*set_flip_control_gsl)( - struct pipe_ctx *pipe_ctx, - bool flip_immediate); - - void (*update_plane_addr)( - const struct dc *dc, - struct pipe_ctx *pipe_ctx); - - void (*plane_atomic_disconnect)( - struct dc *dc, - struct pipe_ctx *pipe_ctx); - - void (*update_dchub)( - struct dce_hwseq *hws, - struct dchub_init_data *dh_data); - - int (*init_sys_ctx)( - struct dce_hwseq *hws, - struct dc *dc, - struct dc_phy_addr_space_config *pa_config); - void (*init_vm_ctx)( - struct dce_hwseq *hws, - struct dc *dc, - struct dc_virtual_addr_space_config *va_config, - int vmid); - void (*update_mpcc)( - struct dc *dc, - struct pipe_ctx *pipe_ctx); - - void (*update_pending_status)( + void (*update_dchub)(struct dce_hwseq *hws, + struct dchub_init_data *dh_data); + void (*wait_for_mpcc_disconnect)(struct dc *dc, + struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx); - - bool (*set_input_transfer_func)(struct dc *dc, - struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state); - - bool (*set_output_transfer_func)(struct dc *dc, - struct pipe_ctx *pipe_ctx, - const struct dc_stream_state *stream); - - void (*power_down)(struct dc *dc); - - void (*enable_accelerated_mode)(struct dc *dc, struct dc_state *context); - - void (*enable_timing_synchronization)( - struct dc *dc, - int group_index, - int group_size, - struct pipe_ctx *grouped_pipes[]); - - void (*enable_per_frame_crtc_position_reset)( - struct dc *dc, - int group_size, + void (*program_triplebuffer)(const struct dc *dc, + struct pipe_ctx *pipe_ctx, bool enableTripleBuffer); + void (*update_pending_status)(struct pipe_ctx *pipe_ctx); + + /* Pipe Lock Related */ + void (*pipe_control_lock_global)(struct dc *dc, + struct pipe_ctx *pipe, bool lock); + void (*pipe_control_lock)(struct dc *dc, + struct pipe_ctx *pipe, bool lock); + void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx, + bool flip_immediate); + + /* Timing Related */ + void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, + struct crtc_position *position); + int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx); + void (*enable_per_frame_crtc_position_reset)(struct dc *dc, + int group_size, struct pipe_ctx *grouped_pipes[]); + void (*enable_timing_synchronization)(struct dc *dc, + int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); + void (*setup_periodic_interrupt)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum vline_select vline); + void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, + unsigned int vmin, unsigned int vmax, + unsigned int vmid, unsigned int vmid_frame_number); + void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, + int num_pipes, + const struct dc_static_screen_events *events); - void (*enable_display_pipe_clock_gating)( - struct dc_context *ctx, - bool clock_gating); - - bool (*enable_display_power_gating)( - struct dc *dc, - uint8_t controller_id, - struct dc_bios *dcb, - enum pipe_gating_control power_gating); - - void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*update_info_frame)(struct pipe_ctx *pipe_ctx); - - void (*send_immediate_sdp_message)( - struct pipe_ctx *pipe_ctx, - const uint8_t *custom_sdp_message, - unsigned int sdp_message_size); - + /* Stream Related */ void (*enable_stream)(struct pipe_ctx *pipe_ctx); - void (*disable_stream)(struct pipe_ctx *pipe_ctx); - + void (*blank_stream)(struct pipe_ctx *pipe_ctx); void (*unblank_stream)(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); - void (*blank_stream)(struct pipe_ctx *pipe_ctx); - - void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx); - - void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx); - - void (*pipe_control_lock)( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock); + /* Bandwidth Related */ + void (*prepare_bandwidth)(struct dc *dc, struct dc_state *context); + bool (*update_bandwidth)(struct dc *dc, struct dc_state *context); + void (*optimize_bandwidth)(struct dc *dc, struct dc_state *context); - void (*pipe_control_lock_global)( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock); - void (*blank_pixel_data)( - struct dc *dc, + /* Infopacket Related */ + void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable); + void (*send_immediate_sdp_message)( struct pipe_ctx *pipe_ctx, - bool blank); - - void (*prepare_bandwidth)( - struct dc *dc, - struct dc_state *context); - void (*optimize_bandwidth)( - struct dc *dc, - struct dc_state *context); - - void (*exit_optimized_pwr_state)( - const struct dc *dc, - struct dc_state *context); - void (*optimize_pwr_state)( - const struct dc *dc, - struct dc_state *context); - - bool (*update_bandwidth)( - struct dc *dc, - struct dc_state *context); + const uint8_t *custom_sdp_message, + unsigned int sdp_message_size); + void (*update_info_frame)(struct pipe_ctx *pipe_ctx); + void (*set_dmdata_attributes)(struct pipe_ctx *pipe); void (*program_dmdata_engine)(struct pipe_ctx *pipe_ctx); bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx); - void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, - unsigned int vmin, unsigned int vmax, - unsigned int vmid, unsigned int vmid_frame_number); - - void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, - struct crtc_position *position); - - void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_events *events); - - enum dc_status (*enable_stream_timing)( - struct pipe_ctx *pipe_ctx, - struct dc_state *context, - struct dc *dc); - - void (*setup_stereo)( - struct pipe_ctx *pipe_ctx, - struct dc *dc); - - void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable); - - void (*log_hw_state)(struct dc *dc, - struct dc_log_buffer_ctx *log_ctx); - void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); - void (*clear_status_bits)(struct dc *dc, unsigned int mask); - - void (*wait_for_mpcc_disconnect)(struct dc *dc, - struct resource_pool *res_pool, - struct pipe_ctx *pipe_ctx); - - void (*edp_power_control)( - struct dc_link *link, - bool enable); - void (*edp_backlight_control)( - struct dc_link *link, - bool enable); - void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); - + /* Cursor Related */ void (*set_cursor_position)(struct pipe_ctx *pipe); void (*set_cursor_attribute)(struct pipe_ctx *pipe); void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe); - void (*setup_periodic_interrupt)(struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum vline_select vline); - void (*setup_vupdate_interrupt)(struct dc *dc, struct pipe_ctx *pipe_ctx); - bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*init_blank)(struct dc *dc, struct timing_generator *tg); - void (*disable_vga)(struct dce_hwseq *hws); - void (*bios_golden_init)(struct dc *dc); - void (*plane_atomic_power_down)(struct dc *dc, - struct dpp *dpp, - struct hubp *hubp); - - void (*plane_atomic_disable)( - struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*enable_power_gating_plane)( - struct dce_hwseq *hws, - bool enable); - - void (*dpp_pg_control)( - struct dce_hwseq *hws, - unsigned int dpp_inst, - bool power_on); - - void (*hubp_pg_control)( - struct dce_hwseq *hws, - unsigned int hubp_inst, - bool power_on); - - void (*dsc_pg_control)( - struct dce_hwseq *hws, - unsigned int dsc_inst, - bool power_on); - + /* Colour Related */ + void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx); + void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, int opp_id); - void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); - void (*program_all_writeback_pipes_in_tree)( + /* VM Related */ + int (*init_sys_ctx)(struct dce_hwseq *hws, struct dc *dc, - const struct dc_stream_state *stream, - struct dc_state *context); + struct dc_phy_addr_space_config *pa_config); + void (*init_vm_ctx)(struct dce_hwseq *hws, + struct dc *dc, + struct dc_virtual_addr_space_config *va_config, + int vmid); + + /* Writeback Related */ void (*update_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, struct dc_writeback_info *wb_info, @@ -330,46 +158,32 @@ struct hw_sequencer_funcs { struct dc_state *context); void (*disable_writeback)(struct dc *dc, unsigned int dwb_pipe_inst); - enum dc_status (*set_clock)(struct dc *dc, - enum dc_clock_type clock_type, - uint32_t clk_khz, - uint32_t stepping); - void (*get_clock)(struct dc *dc, + /* Clock Related */ + enum dc_status (*set_clock)(struct dc *dc, enum dc_clock_type clock_type, + uint32_t clk_khz, uint32_t stepping); + void (*get_clock)(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); - - bool (*s0i3_golden_init_wa)(struct dc *dc); - - void (*get_surface_visual_confirm_color)( - const struct pipe_ctx *pipe_ctx, - struct tg_color *color); - - void (*get_hdr_visual_confirm_color)( - struct pipe_ctx *pipe_ctx, - struct tg_color *color); - - void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx); - - void (*verify_allow_pstate_change_high)(struct dc *dc); - - void (*program_pipe)( - struct dc *dc, - struct pipe_ctx *pipe_ctx, + void (*optimize_pwr_state)(const struct dc *dc, + struct dc_state *context); + void (*exit_optimized_pwr_state)(const struct dc *dc, struct dc_state *context); - bool (*wait_for_blank_complete)( - struct output_pixel_processor *opp); + /* Audio Related */ + void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx); + void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx); - void (*dccg_init)(struct dce_hwseq *hws); + /* Stereo 3D Related */ + void (*setup_stereo)(struct pipe_ctx *pipe_ctx, struct dc *dc); - bool (*set_blend_lut)( - struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); + /* HW State Logging Related */ + void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx); + void (*get_hw_state)(struct dc *dc, char *pBuf, + unsigned int bufSize, unsigned int mask); + void (*clear_status_bits)(struct dc *dc, unsigned int mask); - bool (*set_shaper_3dlut)( - struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); - int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx); }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h new file mode 100644 index 000000000000..8ba06f015975 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -0,0 +1,156 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HW_SEQUENCER_PRIVATE_H__ +#define __DC_HW_SEQUENCER_PRIVATE_H__ + +#include "dc_types.h" + +enum pipe_gating_control { + PIPE_GATING_CONTROL_DISABLE = 0, + PIPE_GATING_CONTROL_ENABLE, + PIPE_GATING_CONTROL_INIT +}; + +struct dce_hwseq_wa { + bool blnd_crtc_trigger; + bool DEGVIDCN10_253; + bool false_optc_underflow; + bool DEGVIDCN10_254; + bool DEGVIDCN21; +}; + +struct hwseq_wa_state { + bool DEGVIDCN10_253_applied; +}; + +struct pipe_ctx; +struct dc_state; +struct dc_stream_status; +struct dc_writeback_info; +struct dchub_init_data; +struct dc_static_screen_events; +struct resource_pool; +struct resource_context; +struct stream_resource; +struct dc_phy_addr_space_config; +struct dc_virtual_addr_space_config; +struct hubp; +struct dpp; +struct dce_hwseq; +struct timing_generator; +struct tg_color; +struct output_pixel_processor; + +struct hwseq_private_funcs { + + void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*init_pipes)(struct dc *dc, struct dc_state *context); + void (*reset_hw_ctx_wrap)(struct dc *dc, struct dc_state *context); + void (*update_plane_addr)(const struct dc *dc, + struct pipe_ctx *pipe_ctx); + void (*plane_atomic_disconnect)(struct dc *dc, + struct pipe_ctx *pipe_ctx); + void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx); + bool (*set_input_transfer_func)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); + bool (*set_output_transfer_func)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream); + void (*power_down)(struct dc *dc); + void (*enable_display_pipe_clock_gating)(struct dc_context *ctx, + bool clock_gating); + bool (*enable_display_power_gating)(struct dc *dc, + uint8_t controller_id, + struct dc_bios *dcb, + enum pipe_gating_control power_gating); + void (*blank_pixel_data)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); + enum dc_status (*enable_stream_timing)( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); + void (*edp_backlight_control)(struct dc_link *link, + bool enable); + void (*setup_vupdate_interrupt)(struct dc *dc, + struct pipe_ctx *pipe_ctx); + bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*init_blank)(struct dc *dc, struct timing_generator *tg); + void (*disable_vga)(struct dce_hwseq *hws); + void (*bios_golden_init)(struct dc *dc); + void (*plane_atomic_power_down)(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp); + void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*enable_power_gating_plane)(struct dce_hwseq *hws, + bool enable); + void (*dpp_pg_control)(struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); + void (*hubp_pg_control)(struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); + void (*dsc_pg_control)(struct dce_hwseq *hws, + unsigned int dsc_inst, + bool power_on); + void (*update_odm)(struct dc *dc, struct dc_state *context, + struct pipe_ctx *pipe_ctx); + void (*program_all_writeback_pipes_in_tree)(struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context); + bool (*s0i3_golden_init_wa)(struct dc *dc); + void (*get_surface_visual_confirm_color)( + const struct pipe_ctx *pipe_ctx, + struct tg_color *color); + void (*get_hdr_visual_confirm_color)(struct pipe_ctx *pipe_ctx, + struct tg_color *color); + void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx); + void (*verify_allow_pstate_change_high)(struct dc *dc); + void (*program_pipe)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); + bool (*wait_for_blank_complete)(struct output_pixel_processor *opp); + void (*dccg_init)(struct dce_hwseq *hws); + bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); + bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); +}; + +struct dce_hwseq { + struct dc_context *ctx; + const struct dce_hwseq_registers *regs; + const struct dce_hwseq_shift *shifts; + const struct dce_hwseq_mask *masks; + struct dce_hwseq_wa wa; + struct hwseq_wa_state wa_state; + struct hwseq_private_funcs funcs; + +}; + +#endif /* __DC_HW_SEQUENCER_PRIVATE_H__ */ -- cgit From 10f51857b753af25cc048d8c5791433ee94f9164 Mon Sep 17 00:00:00 2001 From: Michael Strauss Date: Mon, 4 Nov 2019 13:39:20 -0500 Subject: drm/amd/display: Fix Dali clk mgr construct [WHY] Dali is currently being misinterpreted as Renoir, as a result uses wrong clk mgr constructor [HOW] Add check to init Dali as Raven2 before it can be misidentified Clean up & fix Raven2 & Dali ASIC checks Signed-off-by: Michael Strauss Reviewed-by: Eric Yang Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c | 7 +++++++ drivers/gpu/drm/amd/display/include/dal_asic_id.h | 12 +++++------- 2 files changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index a7c4c1d1fc59..6d60ef822619 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -134,6 +134,13 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p #if defined(CONFIG_DRM_AMD_DC_DCN) case FAMILY_RV: + if (ASICREV_IS_DALI(asic_id.hw_internal_rev)) { + /* TEMP: this check has to come before ASICREV_IS_RENOIR */ + /* which also incorrectly returns true for Dali */ + rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); + break; + } + if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) { rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); break; diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 6f56208a9471..72b659c63aea 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -134,19 +134,17 @@ #define PICASSO_A0 0x41 /* DCN1_01 */ #define RAVEN2_A0 0x81 +#define RAVEN2_15D8_REV_E3 0xE3 +#define RAVEN2_15D8_REV_E4 0xE4 #define RAVEN1_F0 0xF0 #define RAVEN_UNKNOWN 0xFF -#define PICASSO_15D8_REV_E3 0xE3 -#define PICASSO_15D8_REV_E4 0xE4 - #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN) #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0)) -#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < PICASSO_15D8_REV_E3)) -#define ASICREV_IS_DALI(eChipRev) ((eChipRev >= PICASSO_15D8_REV_E3) && (eChipRev < RAVEN1_F0)) - +#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0)) #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN)) - +#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \ + || (eChipRev == RAVEN2_15D8_REV_E4)) #define FAMILY_RV 142 /* DCN 1*/ -- cgit From eab4bb97c8e7af219e12afc53f0def71a54d014b Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Thu, 7 Nov 2019 13:06:48 -0500 Subject: drm/amd/display: Map DSC resources 1-to-1 if numbers of OPPs and DSCs are equal [why] On ASICs where number of DSCs is the same as OPPs there's no need for DSC resource management. Mappping 1-to-1 fixes mode-set- or S3- -related issues for such platforms. [how] Map DSC resources 1-to-1 to pipes only if number of OPPs is the same as number of DSCs. This will still keep other ASICs working. A follow-up patch to fix mode-set issues on those ASICs will be required if testing shows issues with mode set. Signed-off-by: Nikola Cornij Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 2a158ff5f0a5..322556c587b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1524,13 +1524,20 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state static void acquire_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, - struct display_stream_compressor **dsc) + struct display_stream_compressor **dsc, + int pipe_idx) { int i; ASSERT(*dsc == NULL); *dsc = NULL; + if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { + *dsc = pool->dscs[pipe_idx]; + res_ctx->is_dsc_acquired[pipe_idx] = true; + return; + } + /* Find first free DSC */ for (i = 0; i < pool->res_cap->num_dsc; i++) if (!res_ctx->is_dsc_acquired[i]) { @@ -1571,7 +1578,7 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc, if (pipe_ctx->stream != dc_stream) continue; - acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc); + acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i); /* The number of DSCs can be less than the number of pipes */ if (!pipe_ctx->stream_res.dsc) { @@ -1763,7 +1770,7 @@ bool dcn20_split_stream_for_odm( } next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; if (next_odm_pipe->stream->timing.flags.DSC == 1) { - acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc); + acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); ASSERT(next_odm_pipe->stream_res.dsc); if (next_odm_pipe->stream_res.dsc == NULL) return false; -- cgit From 580c8be278b09af65296aaa1a2227d3003eb7892 Mon Sep 17 00:00:00 2001 From: Joseph Gravenor Date: Mon, 4 Nov 2019 16:39:35 -0500 Subject: drm/amd/display: fix DalDramClockChangeLatencyNs override [why] pstate_latency_us never gets updated from the hard coded value in rn_clk_mgr.c [how] update the wm table's values before we do calculations with them Signed-off-by: Joseph Gravenor Reviewed-by: Eric Yang Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 818c7a629484..fef11d57d2b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1011,9 +1011,12 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s } if (dc->bb_overrides.dram_clock_change_latency_ns) { - bb->dram_clock_change_latency_us = + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us = dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } } + kernel_fpu_end(); } -- cgit From a4cea11655fbc5246fb5a066588e1589e9cfefa5 Mon Sep 17 00:00:00 2001 From: Jaehyun Chung Date: Thu, 7 Nov 2019 11:16:49 -0500 Subject: drm/amd/display: Wrong ifdef guards were used around DML validation [Why] Wrong guards were causing the debug option not to run. [How] Changed the guard to the correct one, matching the rq, ttu, dlg regs struct members that need to be guarded. Also log a message when validation starts. Signed-off-by: Jaehyun Chung Reviewed-by: Alvin Lee Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 1 + drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index e384c143bb58..061e8adf7476 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2187,7 +2187,7 @@ static void commit_planes_for_stream(struct dc *dc, } if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { dc->hwss.program_front_end_for_ctx(dc, context); -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN if (dc->debug.validate_dml_output) { for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 2823be75b071..84d7ac5dd206 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -1257,6 +1257,7 @@ void hubp2_validate_dml_output(struct hubp *hubp, struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0}; struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0}; DC_LOGGER_INIT(ctx->logger); + DC_LOG_DEBUG("DML Validation | Running Validation"); /* Requestor Regs */ REG_GET(HUBPRET_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 0be1c917b242..4408aed5087b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -267,6 +267,7 @@ void hubp21_validate_dml_output(struct hubp *hubp, struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0}; struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0}; DC_LOGGER_INIT(ctx->logger); + DC_LOG_DEBUG("DML Validation | Running Validation"); /* Requester - Per hubp */ REG_GET(HUBPRET_CONTROL, -- cgit From 832aa63bef346fc6a58bf46412036d368142fddf Mon Sep 17 00:00:00 2001 From: Paul Hsieh Date: Fri, 1 Nov 2019 14:41:37 +0800 Subject: drm/amd/display: Reset PHY in link re-training [Why] Link training failed randomly when plugging USB-C display in/out. [How] If link training failed, reset PHY in link re-training. Signed-off-by: Paul Hsieh Reviewed-by: Wenjing Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 32 ++-------- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 68 ++++++++++++++++++---- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 14 +---- drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h | 5 +- 4 files changed, 66 insertions(+), 53 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 093f6c808876..5a35395e6060 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1495,7 +1495,6 @@ static enum dc_status enable_link_dp( bool skip_video_pattern; struct dc_link *link = stream->link; struct dc_link_settings link_settings = {0}; - enum dp_panel_mode panel_mode; bool fec_enable; int i; bool apply_seamless_boot_optimization = false; @@ -1531,40 +1530,17 @@ static enum dc_status enable_link_dp( if (state->clk_mgr && !apply_seamless_boot_optimization) state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); - dp_enable_link_phy( - link, - pipe_ctx->stream->signal, - pipe_ctx->clock_source->id, - &link_settings); - - if (stream->sink_patches.dppowerup_delay > 0) { - int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; - - msleep(delay_dp_power_up_in_ms); - } - - panel_mode = dp_get_panel_mode(link); - dp_set_panel_mode(link, panel_mode); - - /* We need to do this before the link training to ensure the idle pattern in SST - * mode will be sent right after the link training */ - link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, - pipe_ctx->stream_res.stream_enc->id, true); skip_video_pattern = true; if (link_settings.link_rate == LINK_RATE_LOW) skip_video_pattern = false; - if (link->aux_access_disabled) { - dc_link_dp_perform_link_training_skip_aux(link, &link_settings); - - link->cur_link_settings = link_settings; - status = DC_OK; - } else if (perform_link_training_with_retries( - link, + if (perform_link_training_with_retries( &link_settings, skip_video_pattern, - LINK_TRAINING_ATTEMPTS)) { + LINK_TRAINING_ATTEMPTS, + pipe_ctx, + pipe_ctx->stream->signal)) { link->cur_link_settings = link_settings; status = DC_OK; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 272261192e82..537b4dee8f22 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1433,23 +1433,58 @@ enum link_training_result dc_link_dp_perform_link_training( } bool perform_link_training_with_retries( - struct dc_link *link, const struct dc_link_settings *link_setting, bool skip_video_pattern, - int attempts) + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal) { uint8_t j; uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + enum dp_panel_mode panel_mode = dp_get_panel_mode(link); for (j = 0; j < attempts; ++j) { - if (dc_link_dp_perform_link_training( + dp_enable_link_phy( + link, + signal, + pipe_ctx->clock_source->id, + link_setting); + + if (stream->sink_patches.dppowerup_delay > 0) { + int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; + + msleep(delay_dp_power_up_in_ms); + } + + dp_set_panel_mode(link, panel_mode); + + /* We need to do this before the link training to ensure the idle pattern in SST + * mode will be sent right after the link training + */ + link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, + pipe_ctx->stream_res.stream_enc->id, true); + + if (link->aux_access_disabled) { + dc_link_dp_perform_link_training_skip_aux(link, link_setting); + return true; + } else if (dc_link_dp_perform_link_training( link, link_setting, skip_video_pattern) == LINK_TRAINING_SUCCESS) return true; + /* latest link training still fail, skip delay and keep PHY on + */ + if (j == (attempts - 1)) + break; + + dp_disable_link_phy(link, signal); + msleep(delay_between_attempts); + delay_between_attempts += LINK_TRAINING_RETRY_DELAY; } @@ -2770,17 +2805,26 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd sizeof(hpd_irq_dpcd_data), "Status: "); - perform_link_training_with_retries(link, - &link->cur_link_settings, - true, LINK_TRAINING_ATTEMPTS); - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && - pipe_ctx->stream->dpms_off == false && - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - dc_link_allocate_mst_payload(pipe_ctx); - } + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + break; + } + + if (pipe_ctx == NULL || pipe_ctx->stream == NULL) + return false; + + dp_disable_link_phy(link, pipe_ctx->stream->signal); + + perform_link_training_with_retries(&link->cur_link_settings, + true, LINK_TRAINING_ATTEMPTS, + pipe_ctx, + pipe_ctx->stream->signal); + + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && + pipe_ctx->stream->dpms_off == false && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + dc_link_allocate_mst_payload(pipe_ctx); } status = false; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 67ce12df23f1..548aac02ca11 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -333,20 +333,12 @@ void dp_retrain_link_dp_test(struct dc_link *link, memset(&link->cur_link_settings, 0, sizeof(link->cur_link_settings)); - link->link_enc->funcs->enable_dp_output( - link->link_enc, - link_setting, - pipes[i].clock_source->id); - link->cur_link_settings = *link_setting; - - dp_receiver_power_ctrl(link, true); - perform_link_training_with_retries( - link, link_setting, skip_video_pattern, - LINK_TRAINING_ATTEMPTS); - + LINK_TRAINING_ATTEMPTS, + &pipes[i], + SIGNAL_TYPE_DISPLAY_PORT); link->dc->hwss.enable_stream(&pipes[i]); diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 4879cf54d8f1..6198bccd6199 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -57,10 +57,11 @@ void decide_link_settings( struct dc_link_settings *link_setting); bool perform_link_training_with_retries( - struct dc_link *link, const struct dc_link_settings *link_setting, bool skip_video_pattern, - int attempts); + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal); bool is_mst_supported(struct dc_link *link); -- cgit From 61f14c5b528f0c828e0d1f27f8580a7f43ad1a5b Mon Sep 17 00:00:00 2001 From: Lucy Li Date: Fri, 25 Oct 2019 17:59:32 -0400 Subject: drm/amd/display: Disable link before reenable [Why] Black screen seen after display is disabled then re-enabled. Caused by difference in link settings when switching between different resolutions. [How] In PnP case, or whenever the display is still enabled but the driver is unloaded, disable link before re-enabling with new link settings. Signed-off-by: Lucy Li Reviewed-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 99 ++++++++++++++------------- 1 file changed, 52 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 5a35395e6060..4681ca20f683 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1511,15 +1511,6 @@ static enum dc_status enable_link_dp( decide_link_settings(stream, &link_settings); if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { - /* If link settings are different than current and link already enabled - * then need to disable before programming to new rate. - */ - if (link->link_status.link_active && - (link->cur_link_settings.lane_count != link_settings.lane_count || - link->cur_link_settings.link_rate != link_settings.link_rate)) { - dp_disable_link_phy(link, pipe_ctx->stream->signal); - } - /*in case it is not on*/ link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true); @@ -2039,6 +2030,45 @@ static void write_i2c_redriver_setting( ASSERT(i2c_success); } +static void disable_link(struct dc_link *link, enum signal_type signal) +{ + /* + * TODO: implement call for dp_set_hw_test_pattern + * it is needed for compliance testing + */ + + /* Here we need to specify that encoder output settings + * need to be calculated as for the set mode, + * it will lead to querying dynamic link capabilities + * which should be done before enable output + */ + + if (dc_is_dp_signal(signal)) { + /* SST DP, eDP */ + if (dc_is_dp_sst_signal(signal)) + dp_disable_link_phy(link, signal); + else + dp_disable_link_phy_mst(link, signal); + + if (dc_is_dp_sst_signal(signal) || + link->mst_stream_alloc_table.stream_count == 0) { + dp_set_fec_enable(link, false); + dp_set_fec_ready(link, false); + } + } else { + if (signal != SIGNAL_TYPE_VIRTUAL) + link->link_enc->funcs->disable_output(link->link_enc, signal); + } + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + /* MST disable link only when no stream use the link */ + if (link->mst_stream_alloc_table.stream_count <= 0) + link->link_status.link_active = false; + } else { + link->link_status.link_active = false; + } +} + static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; @@ -2123,6 +2153,19 @@ static enum dc_status enable_link( struct pipe_ctx *pipe_ctx) { enum dc_status status = DC_ERROR_UNEXPECTED; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + + /* There's some scenarios where driver is unloaded with display + * still enabled. When driver is reloaded, it may cause a display + * to not light up if there is a mismatch between old and new + * link settings. Need to call disable first before enabling at + * new link settings. + */ + if (link->link_status.link_active) { + disable_link(link, pipe_ctx->stream->signal); + } + switch (pipe_ctx->stream->signal) { case SIGNAL_TYPE_DISPLAY_PORT: status = enable_link_dp(state, pipe_ctx); @@ -2157,44 +2200,6 @@ static enum dc_status enable_link( return status; } -static void disable_link(struct dc_link *link, enum signal_type signal) -{ - /* - * TODO: implement call for dp_set_hw_test_pattern - * it is needed for compliance testing - */ - - /* here we need to specify that encoder output settings - * need to be calculated as for the set mode, - * it will lead to querying dynamic link capabilities - * which should be done before enable output */ - - if (dc_is_dp_signal(signal)) { - /* SST DP, eDP */ - if (dc_is_dp_sst_signal(signal)) - dp_disable_link_phy(link, signal); - else - dp_disable_link_phy_mst(link, signal); - - if (dc_is_dp_sst_signal(signal) || - link->mst_stream_alloc_table.stream_count == 0) { - dp_set_fec_enable(link, false); - dp_set_fec_ready(link, false); - } - } else { - if (signal != SIGNAL_TYPE_VIRTUAL) - link->link_enc->funcs->disable_output(link->link_enc, signal); - } - - if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - /* MST disable link only when no stream use the link */ - if (link->mst_stream_alloc_table.stream_count <= 0) - link->link_status.link_active = false; - } else { - link->link_status.link_active = false; - } -} - static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing) { -- cgit From 20ad55c89d39de5436eb68633d317196f2ca15ae Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Thu, 7 Nov 2019 14:41:06 -0500 Subject: drm/amd/display: Add DMCUB__PG_DONE trace code enum Signed-off-by: Yongqiang Sun Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h index b0ee099d8a6e..6b3ee42db350 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h @@ -45,6 +45,7 @@ enum dmucb_trace_code { DMCUB__DMCU_ISR_LOAD_END, DMCUB__MAIN_IDLE, DMCUB__PERF_TRACE, + DMCUB__PG_DONE, }; struct dmcub_trace_buf_entry { -- cgit From 3c465370f20abd3c8783da816a445ad5bcbfdb76 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 7 Nov 2019 15:26:14 -0500 Subject: drm/amd/display: Only wait for DMUB phy init on dcn21 [Why] The wait for PHY init won't finish if the firmware doesn't support it. [How] Only hook this functionality up on DCN21 and move it out of DCN20. For ASIC without support then this should return OK so we don't hang while waiting in DC. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 5 ----- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h | 2 -- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c | 5 +++++ drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h | 2 ++ drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index e2b2cf2e01fd..6b7d54572aa3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -135,8 +135,3 @@ bool dmub_dcn20_is_supported(struct dmub_srv *dmub) return supported; } - -bool dmub_dcn20_is_phy_init(struct dmub_srv *dmub) -{ - return REG_READ(DMCUB_SCRATCH10) == 0; -} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index e1ba748ca594..ca7db03b94f7 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -59,6 +59,4 @@ bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub); bool dmub_dcn20_is_supported(struct dmub_srv *dmub); -bool dmub_dcn20_is_phy_init(struct dmub_srv *dmub); - #endif /* _DMUB_DCN20_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c index d40a808112e7..b9dc2dd645eb 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c @@ -124,3 +124,8 @@ bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub) { return (REG_READ(DMCUB_SCRATCH0) == 3); } + +bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_SCRATCH10) == 0; +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h index f57969d8d56f..9e5f195e288f 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h @@ -42,4 +42,6 @@ void dmub_dcn21_setup_windows(struct dmub_srv *dmub, bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub); +bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub); + #endif /* _DMUB_DCN21_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 60c574a39c6a..3ec26f6af2e1 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -76,13 +76,13 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; funcs->is_supported = dmub_dcn20_is_supported; - funcs->is_phy_init = dmub_dcn20_is_phy_init; funcs->is_hw_init = dmub_dcn20_is_hw_init; if (asic == DMUB_ASIC_DCN21) { funcs->backdoor_load = dmub_dcn21_backdoor_load; funcs->setup_windows = dmub_dcn21_setup_windows; funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done; + funcs->is_phy_init = dmub_dcn21_is_phy_init; } break; -- cgit From a49421185189ba05f4c1b8477f1272fb2d024d2f Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 7 Nov 2019 15:29:20 -0500 Subject: drm/amd/display: Return DMUB_STATUS_OK when autoload unsupported [Why] Not having support for autoload isn't an error. If the DMUB firmware doesn't support it then don't return DMUB_STATUS_INVALID. [How] Return DMUB_STATUS_OK when ->is_auto_load_done is NULL. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 3ec26f6af2e1..70c7a4be9ccc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -379,9 +379,12 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, { uint32_t i; - if (!dmub->hw_init || !dmub->hw_funcs.is_auto_load_done) + if (!dmub->hw_init) return DMUB_STATUS_INVALID; + if (!dmub->hw_funcs.is_auto_load_done) + return DMUB_STATUS_OK; + for (i = 0; i <= timeout_us; i += 100) { if (dmub->hw_funcs.is_auto_load_done(dmub)) return DMUB_STATUS_OK; @@ -397,9 +400,12 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, { uint32_t i = 0; - if (!dmub->hw_init || !dmub->hw_funcs.is_phy_init) + if (!dmub->hw_init) return DMUB_STATUS_INVALID; + if (!dmub->hw_funcs.is_phy_init) + return DMUB_STATUS_OK; + for (i = 0; i <= timeout_us; i += 10) { if (dmub->hw_funcs.is_phy_init(dmub)) return DMUB_STATUS_OK; -- cgit From c1825b5777ce25b0f84be8775a588d2bfa1e6420 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 7 Nov 2019 15:47:46 -0500 Subject: drm/amd/display: Program CW5 for tracebuffer for dcn20 [Why] On dcn21 this is programmed for tracebuffer support but isn't being programmed on dcn20. DMCUB execution hits an undefined address 65000000 on tracebuffer access. [How] Program CW5. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index 6b7d54572aa3..302dd3d4b77d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -99,6 +99,13 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, 1); + + REG_WRITE(DMCUB_REGION3_CW5_OFFSET, cw5->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, cw5->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); + REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, + DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, + DMCUB_REGION3_CW5_ENABLE, 1); } void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, -- cgit From 01290a5766e1be38ee9f114d261bee263b168ab4 Mon Sep 17 00:00:00 2001 From: Joseph Gravenor Date: Thu, 7 Nov 2019 19:20:00 -0500 Subject: drm/amd/display: populate bios integrated info for renoir [Why] When video_memory_type bw_params->vram_type is assigned, wedistinguish between Ddr4MemType and LpDdr4MemType. Because of this we will never report that we are using LpDdr4MemType and never re-purpose WM set D [How] populate bios integrated info for renoir by adding the revision number for renoir and use that integrated info table instead of of asic_id to get the vram type Signed-off-by: Joseph Gravenor Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 1 + drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 4e24717ee87b..2a74be731faa 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1638,6 +1638,7 @@ static enum bp_result construct_integrated_info( /* Don't need to check major revision as they are all 1 */ switch (revision.minor) { case 11: + case 12: result = get_integrated_info_v11(bp, info); break; default: diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 841095d09d3c..9f0381c68844 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -569,7 +569,7 @@ static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsi return 0; } -static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id) +static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info) { int i, j = 0; @@ -601,8 +601,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol); } - bw_params->vram_type = asic_id->vram_type; - bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH; + bw_params->vram_type = bios_info->memory_type; + bw_params->num_channels = bios_info->ma_channel_number; for (i = 0; i < WM_SET_COUNT; i++) { bw_params->wm_table.entries[i].wm_inst = i; @@ -685,7 +685,9 @@ void rn_clk_mgr_construct( if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) { pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); - rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id); + if (ctx->dc_bios && ctx->dc_bios->integrated_info) { + rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info); + } } if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) { -- cgit From 8fb3a6363684b5f00e7b438e22fc17346711f4fa Mon Sep 17 00:00:00 2001 From: David Galiffi Date: Thu, 7 Nov 2019 17:18:20 -0500 Subject: drm/amd/display: Fixed kernel panic when booting with DP-to-HDMI dongle [Why] In dc_link_is_dp_sink_present, if dal_ddc_open fails, then dal_gpio_destroy_ddc is called, destroying pin_data and pin_clock. They are created only on dc_construct, and next aux access will cause a panic. [How] Instead of calling dal_gpio_destroy_ddc, call dal_ddc_close. Signed-off-by: David Galiffi Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 4681ca20f683..cef8c1ba9797 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -372,7 +372,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) if (GPIO_RESULT_OK != dal_ddc_open( ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) { - dal_gpio_destroy_ddc(&ddc); + dal_ddc_close(ddc); return present; } -- cgit From 639dcfc6feb5c7451d1382c5d3fa1fd679eca544 Mon Sep 17 00:00:00 2001 From: Joseph Gravenor Date: Fri, 8 Nov 2019 14:30:34 -0500 Subject: drm/amd/display: have two different sr and pstate latency tables for renoir [Why] new sr and pstate latencies are optimized for the case when we are not using lpddr4 memory [How] have two different wm tables, one for the lpddr case and one for non lpddr case Signed-off-by: Joseph Gravenor Reviewed-by: Eric Yang Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 114 +++++++++++++++------ 1 file changed, 80 insertions(+), 34 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 9f0381c68844..89ed230cdb26 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -518,44 +518,83 @@ struct clk_bw_params rn_bw_params = { .num_entries = 4, }, - .wm_table = { - .entries = { - { - .wm_inst = WM_A, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.72, - .sr_exit_time_us = 6.09, - .sr_enter_plus_exit_time_us = 7.14, - .valid = true, - }, - { - .wm_inst = WM_B, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.72, - .sr_exit_time_us = 10.12, - .sr_enter_plus_exit_time_us = 11.48, - .valid = true, - }, - { - .wm_inst = WM_C, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.72, - .sr_exit_time_us = 10.12, - .sr_enter_plus_exit_time_us = 11.48, - .valid = true, - }, - { - .wm_inst = WM_D, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.72, - .sr_exit_time_us = 10.12, - .sr_enter_plus_exit_time_us = 11.48, - .valid = true, - }, +}; + +struct wm_table ddr4_wm_table = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 6.09, + .sr_enter_plus_exit_time_us = 7.14, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, }, } }; +struct wm_table lpddr4_wm_table = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 23.84, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 23.84, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 23.84, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 23.84, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, + .valid = true, + }, + } +}; + + static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) { int i; @@ -677,10 +716,17 @@ void rn_clk_mgr_construct( ASSERT(clk_mgr->base.dprefclk_khz == 600000); clk_mgr->base.dprefclk_khz = 600000; } + + if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { + rn_bw_params.wm_table = lpddr4_wm_table; + } else { + rn_bw_params.wm_table = ddr4_wm_table; + } } dce_clock_read_ss_info(clk_mgr); + clk_mgr->base.bw_params = &rn_bw_params; if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) { -- cgit From e6d9a93c141e0bcc3a40671b2338c093d04ab14b Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Sun, 10 Nov 2019 12:08:02 -0500 Subject: drm/amd/display: fix dprefclk and ss percentage reading on RN [Why] Before was using HW counter value to determine the dprefclk. Which take into account ss, but has large variation, not good enough for generating audio dto. Also, the bios parser code to get the ss percentage was not working. [How] After this change, dprefclk is hard coded, same as on RV. We don't expect this to change on Renoir. Modified bios parser code to get the right ss percentage. Signed-off-by: Eric Yang Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 1 + .../gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 16 +++------------- drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 1 + 3 files changed, 5 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 2a74be731faa..2f1c9584ac32 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -834,6 +834,7 @@ static enum bp_result bios_parser_get_spread_spectrum_info( case 1: return get_ss_info_v4_1(bp, signal, index, ss_info); case 2: + case 3: return get_ss_info_v4_2(bp, signal, index, ss_info); default: break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 89ed230cdb26..307c8540e36f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -675,7 +675,6 @@ void rn_clk_mgr_construct( { struct dc_debug_options *debug = &ctx->dc->debug; struct dpm_clocks clock_table = { 0 }; - struct clk_state_registers_and_bypass s = { 0 }; clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn21_funcs; @@ -695,7 +694,6 @@ void rn_clk_mgr_construct( if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn21_funcs.update_clocks = dcn2_update_clocks_fpga; clk_mgr->base.dentist_vco_freq_khz = 3600000; - clk_mgr->base.dprefclk_khz = 600000; } else { struct clk_log_info log_info = {0}; @@ -706,24 +704,16 @@ void rn_clk_mgr_construct( if (clk_mgr->base.dentist_vco_freq_khz == 0) clk_mgr->base.dentist_vco_freq_khz = 3600000; - rn_dump_clk_registers(&s, &clk_mgr->base, &log_info); - /* Convert dprefclk units from MHz to KHz */ - /* Value already divided by 10, some resolution lost */ - clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dprefclk_khz == 0) { - ASSERT(clk_mgr->base.dprefclk_khz == 600000); - clk_mgr->base.dprefclk_khz = 600000; - } - if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { rn_bw_params.wm_table = lpddr4_wm_table; } else { rn_bw_params.wm_table = ddr4_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); } + clk_mgr->base.dprefclk_khz = 600000; dce_clock_read_ss_info(clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 4aa09fe954c5..ac530c057ddd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -191,6 +191,7 @@ struct clk_mgr { bool psr_allow_active_cache; int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes int dentist_vco_freq_khz; + struct clk_state_registers_and_bypass boot_snapshot; struct clk_bw_params *bw_params; }; -- cgit From 728a5068239d0f50b4346089e30ba744b5954288 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 11 Nov 2019 10:07:50 -0500 Subject: drm/amd/display: 3.2.61 Signed-off-by: Aric Cyr Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3e6133f8cdc4..34b824270c84 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.60" +#define DC_VER "3.2.61" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit From fa11d3c9425354c4b47e40f34d29c5b0949fe4ce Mon Sep 17 00:00:00 2001 From: "Leo (Hanghong) Ma" Date: Thu, 7 Nov 2019 16:30:04 -0500 Subject: drm/amd/display: Change the delay time before enabling FEC [why] DP spec requires 1000 symbols delay between the end of link training and enabling FEC in the stream. Currently we are using 1 miliseconds delay which is not accurate. [how] One lane RBR should have the maximum time for transmitting 1000 LL codes which is 6.173 us. So using 7 microseconds delay instead of 1 miliseconds. Signed-off-by: Leo (Hanghong) Ma Reviewed-by: Harry Wentland Reviewed-by: Nikola Cornij Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 537b4dee8f22..b10019106030 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -3951,7 +3951,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) if (link_enc->funcs->fec_set_enable && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { if (link->fec_state == dc_link_fec_ready && enable) { - msleep(1); + /* Accord to DP spec, FEC enable sequence can first + * be transmitted anytime after 1000 LL codes have + * been transmitted on the link after link training + * completion. Using 1 lane RBR should have the maximum + * time for transmitting 1000 LL codes which is 6.173 us. + * So use 7 microseconds delay instead. + */ + udelay(7); link_enc->funcs->fec_set_enable(link_enc, true); link->fec_state = dc_link_fec_enabled; } else if (link->fec_state == dc_link_fec_enabled && !enable) { -- cgit From 769dbc16056769ed25904f4b7dfca35c0698c1cd Mon Sep 17 00:00:00 2001 From: Brandon Syu Date: Fri, 8 Nov 2019 11:26:06 +0800 Subject: drm/amd/display: fixed that I2C over AUX didn't read data issue [Why] The variable mismatch assignment error. [How] To use uint32_t replace it. Signed-off-by: Brandon Syu Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 2 +- drivers/gpu/drm/amd/display/include/i2caux_interface.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 3fc9752edfe0..c2c136b12184 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -589,7 +589,7 @@ bool dal_ddc_service_query_ddc_data( bool dal_ddc_submit_aux_command(struct ddc_service *ddc, struct aux_payload *payload) { - uint8_t retrieved = 0; + uint32_t retrieved = 0; bool ret = 0; if (!ddc) diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h index bb012cb1a9f5..c7fbb9c3ad6b 100644 --- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h +++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h @@ -42,7 +42,7 @@ struct aux_payload { bool write; bool mot; uint32_t address; - uint8_t length; + uint32_t length; uint8_t *data; /* * used to return the reply type of the transaction -- cgit From 460adc6b699672f3a15c8d6175b8f0bcb2b3c50f Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Wed, 23 Oct 2019 17:16:51 -0400 Subject: drm/amd/display: add log for lttpr Signed-off-by: abdoulaye berthe Reviewed-by: Wenjing Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 125 +++++++++++++++++------ 1 file changed, 93 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index b10019106030..486c14e0cd41 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -255,11 +255,18 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] = dpcd_pattern.raw; - DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", - __func__, - dpcd_base_lt_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); - + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } /***************************************************************** * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set *****************************************************************/ @@ -289,14 +296,25 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_lane, size_in_bytes); - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - dpcd_base_lt_offset, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); - + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } if (edp_workaround) { /* for eDP write in 2 parts because the 5-byte burst is * causing issues on some eDP panels (EPR#366724) @@ -544,23 +562,42 @@ static void get_lane_status_and_drive_settings( ln_status_updated->raw = dpcd_buf[2]; - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", - __func__, - lane01_status_address, dpcd_buf[0], - lane01_status_address + 1, dpcd_buf[1]); - + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + offset, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + } lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; if (is_repeater(link, offset)) lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", - __func__, - lane01_adjust_address, - dpcd_buf[lane_adjust_offset], - lane01_adjust_address + 1, - dpcd_buf[lane_adjust_offset + 1]); + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + offset, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } /*copy to req_settings*/ request_settings.link_settings.lane_count = @@ -656,14 +693,26 @@ static void dpcd_set_lane_settings( } */ - DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - lane0_set_address, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + lane0_set_address, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + lane0_set_address, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } link->cur_lane_setting = link_training_setting->lane_settings[0]; } @@ -1170,12 +1219,16 @@ static void configure_lttpr_mode(struct dc_link *link) uint8_t repeater_id; uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, (uint8_t *)&repeater_mode, sizeof(repeater_mode)); if (!link->is_lttpr_mode_transparent) { + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, @@ -1212,8 +1265,9 @@ static void repeater_training_done(struct dc_link *link, uint32_t offset) &dpcd_pattern.raw, 1); - DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", __func__, + offset, dpcd_base_lt_offset, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } @@ -1663,6 +1717,11 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate) max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + + DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", + __func__, + max_link_cap.lane_count, + max_link_cap.link_rate); } return max_link_cap; } @@ -3196,6 +3255,8 @@ static bool retrieve_link_cap(struct dc_link *link) link->is_lttpr_mode_transparent = true; dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); } + + CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); } { -- cgit From 1ba2a48305715f5832fac023ddc0d4ceba7a8491 Mon Sep 17 00:00:00 2001 From: Michael Strauss Date: Sun, 10 Nov 2019 15:22:15 -0500 Subject: drm/amd/display: Disable chroma viewport w/a when rotated 180 degrees [WHY] Previous Renoir chroma viewport workaround fixed an MPO flicker by increasing the chroma viewport size. However, when the MPO plane is rotated 180 degrees, the viewport is read in reverse. Since the workaround increases viewport size, when reading in reverse it causes a vertical chroma offset. [HOW] Pass rotation value to viewport set functions Temporarily disable the chroma viewport w/a when hubp is rotated 180 degrees Signed-off-by: Michael Strauss Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 3 ++- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 4 +++- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 3 ++- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 7 +++++-- drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 4 +++- 6 files changed, 17 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 31b64733d693..4d1301e5eaf5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -810,7 +810,8 @@ static void hubp1_set_vm_context0_settings(struct hubp *hubp, void min_set_viewport( struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c) + const struct rect *viewport_c, + enum dc_rotation_angle rotation) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 780af5b3c16f..e44eaae5033b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -749,7 +749,9 @@ void hubp1_set_blank(struct hubp *hubp, bool blank); void min_set_viewport(struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c); + const struct rect *viewport_c, + enum dc_rotation_angle rotation); +/* rotation angle added for use by hubp21_set_viewport */ void hubp1_clk_cntl(struct hubp *hubp, bool enable); void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 2b3081ee0e07..2440e28493e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2286,7 +2286,8 @@ static void dcn10_update_dchubp_dpp( hubp->funcs->mem_program_viewport( hubp, &pipe_ctx->plane_res.scl_data.viewport, - &pipe_ctx->plane_res.scl_data.viewport_c); + &pipe_ctx->plane_res.scl_data.viewport_c, + plane_state->rotation); } if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 619af3b3029d..392542f84ca2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1382,7 +1382,8 @@ static void dcn20_update_dchubp_dpp( hubp->funcs->mem_program_viewport( hubp, &pipe_ctx->plane_res.scl_data.viewport, - &pipe_ctx->plane_res.scl_data.viewport_c); + &pipe_ctx->plane_res.scl_data.viewport_c, + plane_state->rotation); /* Any updates are handled in dc interface, just need to apply existing for plane enable */ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 4408aed5087b..38661b9c61f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -169,7 +169,8 @@ static void hubp21_setup( void hubp21_set_viewport( struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c) + const struct rect *viewport_c, + enum dc_rotation_angle rotation) { struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); int patched_viewport_height = 0; @@ -196,9 +197,11 @@ void hubp21_set_viewport( * Work around for underflow issue with NV12 + rIOMMU translation * + immediate flip. This will cause hubp underflow, but will not * be user visible since underflow is in blank region + * Disable w/a when rotated 180 degrees, causes vertical chroma offset */ patched_viewport_height = viewport_c->height; - if (viewport_c->height != 0 && debug->nv12_iflip_vm_wa) { + if (viewport_c->height != 0 && debug->nv12_iflip_vm_wa && + rotation != ROTATION_ANGLE_180) { int pte_row_height = 0; int pte_rows = 0; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 9793da0f3c7e..85a34dde8526 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -82,7 +82,9 @@ struct hubp_funcs { void (*mem_program_viewport)( struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c); + const struct rect *viewport_c, + enum dc_rotation_angle rotation); + /* rotation needed for Renoir workaround */ bool (*hubp_program_surface_flip_and_addr)( struct hubp *hubp, -- cgit From 084e1de7b938a00608b36a032c59bf987ff91667 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Fri, 8 Nov 2019 16:20:36 -0500 Subject: drm/amd/display: fix dml20 min_dst_y_next_start calculation Bring this calculation in line with HW programming guide. Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 2c7455e22a65..9df24ececcec 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -929,8 +929,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal; dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start; - disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start - + min_dst_y_ttu_vblank) * dml_pow(2, 2)); + disp_dlg_regs->min_dst_y_next_start = (unsigned int) ((double) dlg_vblank_start * dml_pow(2, 2)); ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18)); dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n", -- cgit From 949ffc8b3631ea0192a298109fa583b2f3a39c8f Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Mon, 11 Nov 2019 18:03:59 -0500 Subject: drm/amd/display: Reset steer fifo before unblanking the stream [why] During mode transition steer fifo could overflow. Quite often it recovers by itself, but sometimes it doesn't. [how] Add steer fifo reset before unblanking the stream. Also add a short delay when resetting dig resync fifo to make sure register writes don't end up back-to-back, in which case the HW might miss the reset request. Signed-off-by: Nikola Cornij Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index be0978401476..9b70a1e7b962 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -488,15 +488,23 @@ void enc2_stream_encoder_dp_unblank( DP_VID_N_MUL, n_multiply); } - /* set DIG_START to 0x1 to reset FIFO */ + /* make sure stream is disabled before resetting steer fifo */ + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false); + REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000); + /* set DIG_START to 0x1 to reset FIFO */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); + udelay(1); /* write 0 to take the FIFO out of reset */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); - /* switch DP encoder to CRTC data */ + /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen + * that it overflows during mode transition, and sometimes doesn't recover. + */ + REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); + udelay(10); REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); -- cgit From 2853ecc6610af9e40b6686498330b64a780cb181 Mon Sep 17 00:00:00 2001 From: Reza Amini Date: Thu, 7 Nov 2019 10:10:45 -0500 Subject: drm/amd/display: Implement DePQ for DCN1 [Why] Need support for more color management in 10bit surface. [How] Provide support for DePQ for 10bit surface Signed-off-by: Reza Amini Reviewed-by: Krunoslav Kovac Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c | 3 ++ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 5 +++ .../drm/amd/display/modules/color/color_gamma.c | 39 +++++++++++++++++----- 3 files changed, 38 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index 6b7593dd0c77..935c892622a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -628,6 +628,9 @@ void dpp1_set_degamma( case IPP_DEGAMMA_MODE_HW_xvYCC: REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); break; + case IPP_DEGAMMA_MODE_USER_PWL: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + break; default: BREAK_TO_DEBUGGER(); break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 2440e28493e7..9551fefb9d1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1465,6 +1465,11 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); break; case TRANSFER_FUNCTION_PQ: + dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); + cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); + dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); + result = true; + break; default: result = false; break; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 9b121b08c806..b52c4d379651 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -154,6 +154,7 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y) struct fixed31_32 l_pow_m1; struct fixed31_32 base, div; + struct fixed31_32 base2; if (dc_fixpt_lt(in_x, dc_fixpt_zero)) @@ -163,13 +164,15 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y) dc_fixpt_div(dc_fixpt_one, m2)); base = dc_fixpt_sub(l_pow_m1, c1); - if (dc_fixpt_lt(base, dc_fixpt_zero)) - base = dc_fixpt_zero; - div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1)); - *out_y = dc_fixpt_pow(dc_fixpt_div(base, div), - dc_fixpt_div(dc_fixpt_one, m1)); + base2 = dc_fixpt_div(base, div); + //avoid complex numbers + if (dc_fixpt_lt(base2, dc_fixpt_zero)) + base2 = dc_fixpt_sub(dc_fixpt_zero, base2); + + + *out_y = dc_fixpt_pow(base2, dc_fixpt_div(dc_fixpt_one, m1)); } @@ -1998,10 +2001,28 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, tf_pts->x_point_at_y1_green = 1; tf_pts->x_point_at_y1_blue = 1; - map_regamma_hw_to_x_user(ramp, coeff, rgb_user, - coordinates_x, axis_x, curve, - MAX_HW_POINTS, tf_pts, - mapUserRamp && ramp && ramp->type == GAMMA_RGB_256); + if (input_tf->tf == TRANSFER_FUNCTION_PQ) { + /* just copy current rgb_regamma into tf_pts */ + struct pwl_float_data_ex *curvePt = curve; + int i = 0; + + while (i <= MAX_HW_POINTS) { + tf_pts->red[i] = curvePt->r; + tf_pts->green[i] = curvePt->g; + tf_pts->blue[i] = curvePt->b; + ++curvePt; + ++i; + } + } else { + //clamps to 0-1 + map_regamma_hw_to_x_user(ramp, coeff, rgb_user, + coordinates_x, axis_x, curve, + MAX_HW_POINTS, tf_pts, + mapUserRamp && ramp && ramp->type == GAMMA_RGB_256); + } + + + if (ramp->type == GAMMA_CUSTOM) apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); -- cgit From 89cd5a02dfab35e0393fb04e936a427eea9f8d03 Mon Sep 17 00:00:00 2001 From: Joseph Gravenor Date: Tue, 12 Nov 2019 15:36:57 -0500 Subject: drm/amd/display: update p-state latency for renoir when using lpddr4 [Why] DF team has produced more optimized latency numbers, for lpddr4 [How] change the p-state laency in the lpddr4 wm table to the new latency number Signed-off-by: Joseph Gravenor Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 307c8540e36f..901e7035bf8e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -562,7 +562,7 @@ struct wm_table lpddr4_wm_table = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, + .pstate_latency_us = 11.65333, .sr_exit_time_us = 12.5, .sr_enter_plus_exit_time_us = 17.0, .valid = true, @@ -570,7 +570,7 @@ struct wm_table lpddr4_wm_table = { { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, + .pstate_latency_us = 11.65333, .sr_exit_time_us = 12.5, .sr_enter_plus_exit_time_us = 17.0, .valid = true, @@ -578,7 +578,7 @@ struct wm_table lpddr4_wm_table = { { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, + .pstate_latency_us = 11.65333, .sr_exit_time_us = 12.5, .sr_enter_plus_exit_time_us = 17.0, .valid = true, @@ -586,7 +586,7 @@ struct wm_table lpddr4_wm_table = { { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, + .pstate_latency_us = 11.65333, .sr_exit_time_us = 12.5, .sr_enter_plus_exit_time_us = 17.0, .valid = true, -- cgit From 8d623f86c07fce9b01e42b503df82bbabfb1621d Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Wed, 13 Nov 2019 14:04:56 -0500 Subject: drm/amd/display: add DP protocol version [Why] We want to know DP protocol version [How] In DC create we initialize a cap to indicate the max DP protocol version supported Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 ++ drivers/gpu/drm/amd/display/dc/dc.h | 5 +++++ 2 files changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 061e8adf7476..55f22a1c0aa5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -809,6 +809,8 @@ struct dc *dc_create(const struct dc_init_data *init_params) dc->caps.max_audios = dc->res_pool->audio_count; dc->caps.linear_pitch_alignment = 64; + dc->caps.max_dp_protocol_version = DP_VERSION_1_4; + /* Populate versioning information */ dc->versions.dc_ver = DC_VER; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 34b824270c84..4c7a2882a512 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -54,6 +54,10 @@ struct dc_versions { struct dmcu_version dmcu_version; }; +enum dp_protocol_version { + DP_VERSION_1_4, +}; + enum dc_plane_type { DC_PLANE_TYPE_INVALID, DC_PLANE_TYPE_DCE_RGB, @@ -114,6 +118,7 @@ struct dc_caps { bool extended_aux_timeout_support; bool dmcub_support; bool hw_3d_lut; + enum dp_protocol_version max_dp_protocol_version; struct dc_plane_cap planes[MAX_PLANES]; }; -- cgit From 99218d122a2b329c179cc6917836068da3c3e1ad Mon Sep 17 00:00:00 2001 From: Hugo Hu Date: Wed, 13 Nov 2019 16:18:09 -0500 Subject: drm/amd/display: Save/restore link setting for disable phy when link retraining [Why] The link setting will be modify after disable phy and due to DP Compliance Fails. [How] Save and resotre link setting for disable link phy when link retraining. Signed-off-by: Hugo Hu Reviewed-by: Wenjing Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 486c14e0cd41..015fa0c52746 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2788,9 +2788,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } }; union device_service_irq device_service_clear = { { 0 } }; enum dc_status result; - bool status = false; struct pipe_ctx *pipe_ctx; + struct dc_link_settings previous_link_settings; int i; if (out_link_loss) @@ -2873,9 +2873,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd if (pipe_ctx == NULL || pipe_ctx->stream == NULL) return false; + previous_link_settings = link->cur_link_settings; dp_disable_link_phy(link, pipe_ctx->stream->signal); - perform_link_training_with_retries(&link->cur_link_settings, + perform_link_training_with_retries(&previous_link_settings, true, LINK_TRAINING_ATTEMPTS, pipe_ctx, pipe_ctx->stream->signal); -- cgit From 75441d9d35f71123933eafa4dd55459403582add Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Tue, 12 Nov 2019 13:58:32 -0500 Subject: drm/amd/display: Return a correct error value [why] The function is expected to return instance of the timing generator therefore we shouldn't be returning boolean in integer function, and we shouldn't be returning zero so changing it to -1. Signed-off-by: Mikita Lipski Reviewed-by: Martin Leung Acked-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index a9412720c860..0c19de678339 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1866,7 +1866,7 @@ static int acquire_resource_from_hw_enabled_state( inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (inst == ENGINE_ID_UNKNOWN) - return false; + return -1; for (i = 0; i < pool->stream_enc_count; i++) { if (pool->stream_enc[i]->id == inst) { @@ -1878,10 +1878,10 @@ static int acquire_resource_from_hw_enabled_state( // tg_inst not found if (i == pool->stream_enc_count) - return false; + return -1; if (tg_inst >= pool->timing_generator_count) - return false; + return -1; if (!res_ctx->pipe_ctx[tg_inst].stream) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst]; -- cgit From d4bbcecb596296834aeafb93008474671c7988cf Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 12 Nov 2019 15:33:37 -0500 Subject: drm/amd/display: Split DMUB cmd type into type/subtype [Why] Commands will be considered a stable ABI between driver and firmware. Commands are also split between DC commands, DAL feature commands, and VBIOS commands. Commands are currently not designated to a specific ID and the enum does not provide a stable ABI. We currently group all of these into a single command type of 8-bits. With the stable ABI consideration in mind it's not unreasonable to run out of command IDs. For cleaner separation and versioning split the commands into a main type and a subtype. [How] For commands where performance matters (like reg sequences) these are still considered main commands. Sub commands will be split by ownership/feature. Update existing command sequences to reflect new changes. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/bios/command_table2.c | 13 ++++-- drivers/gpu/drm/amd/display/dc/dc_helper.c | 3 ++ drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 48 ++++++++-------------- .../gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h | 41 ++++++++++++++++++ .../gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h | 41 ++++++++++++++++++ 5 files changed, 112 insertions(+), 34 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 1836f16bb7fe..2cb7a4288cb7 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -111,7 +111,8 @@ static void encoder_control_dmcub( { struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 }; - encoder_control.header.type = DMUB_CMD__DIGX_ENCODER_CONTROL; + encoder_control.header.type = DMUB_CMD__VBIOS; + encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL; encoder_control.encoder_control.dig.stream_param = *dig; dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header); @@ -219,7 +220,9 @@ static void transmitter_control_dmcub( { struct dmub_rb_cmd_dig1_transmitter_control transmitter_control; - transmitter_control.header.type = DMUB_CMD__DIG1_TRANSMITTER_CONTROL; + transmitter_control.header.type = DMUB_CMD__VBIOS; + transmitter_control.header.sub_type = + DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL; transmitter_control.transmitter_control.dig = *dig; dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header); @@ -302,7 +305,8 @@ static void set_pixel_clock_dmcub( { struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 }; - pixel_clock.header.type = DMUB_CMD__SET_PIXEL_CLOCK; + pixel_clock.header.type = DMUB_CMD__VBIOS; + pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK; pixel_clock.pixel_clock.clk = *clk; dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header); @@ -650,7 +654,8 @@ static void enable_disp_power_gating_dmcub( { struct dmub_rb_cmd_enable_disp_power_gating power_gating; - power_gating.header.type = DMUB_CMD__ENABLE_DISP_POWER_GATING; + power_gating.header.type = DMUB_CMD__VBIOS; + power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING; power_gating.power_gating.pwr = *pwr; dc_dmub_srv_cmd_queue(dmcub, &power_gating.header); diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index e41befa067ce..02a63e9cb62f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -178,6 +178,7 @@ static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t } cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; + cmd_buf->header.sub_type = 0; cmd_buf->addr = addr; cmd_buf->write_values[offload->reg_seq_count] = reg_val; offload->reg_seq_count++; @@ -206,6 +207,7 @@ static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, /* pack commands */ cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; + cmd_buf->header.sub_type = 0; seq = &cmd_buf->seq[offload->reg_seq_count]; if (offload->reg_seq_count) { @@ -230,6 +232,7 @@ static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; + cmd_buf->header.sub_type = 0; cmd_buf->reg_wait.addr = addr; cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); cmd_buf->reg_wait.mask = mask; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 43f1cd647aab..b10728f33f62 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -27,6 +27,8 @@ #define _DMUB_CMD_H_ #include "dmub_types.h" +#include "dmub_cmd_dal.h" +#include "dmub_cmd_vbios.h" #include "atomfirmware.h" #define DMUB_RB_CMD_SIZE 64 @@ -34,43 +36,29 @@ #define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY) #define REG_SET_MASK 0xFFFF +/* + * Command IDs should be treated as stable ABI. + * Do not reuse or modify IDs. + */ + enum dmub_cmd_type { - DMUB_CMD__NULL, - DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE, - DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ, - DMUB_CMD__REG_SEQ_BURST_WRITE, - DMUB_CMD__REG_REG_WAIT, - DMUB_CMD__DIGX_ENCODER_CONTROL, - DMUB_CMD__SET_PIXEL_CLOCK, - DMUB_CMD__ENABLE_DISP_POWER_GATING, - DMUB_CMD__DPPHY_INIT, - DMUB_CMD__DIG1_TRANSMITTER_CONTROL, - DMUB_CMD__SETUP_DISPLAY_MODE, - DMUB_CMD__BLANK_CRTC, - DMUB_CMD__ENABLE_DISPPATH, - DMUB_CMD__DISABLE_DISPPATH, - DMUB_CMD__DISABLE_DISPPATH_OUTPUT, - DMUB_CMD__READ_DISPPATH_EDID, - DMUB_CMD__DP_PRE_LINKTRAINING, - DMUB_CMD__INIT_CONTROLLER, - DMUB_CMD__RESET_CONTROLLER, - DMUB_CMD__SET_BRI_LEVEL, - DMUB_CMD__LVTMA_CONTROL, - - // PSR - DMUB_CMD__PSR_ENABLE, - DMUB_CMD__PSR_DISABLE, - DMUB_CMD__PSR_COPY_SETTINGS, - DMUB_CMD__PSR_SET_LEVEL, + DMUB_CMD__NULL = 0, + DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE = 1, + DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ = 2, + DMUB_CMD__REG_SEQ_BURST_WRITE = 3, + DMUB_CMD__REG_REG_WAIT = 4, + DMUB_CMD__PSR = 64, + DMUB_CMD__VBIOS = 128, }; #pragma pack(push, 1) struct dmub_cmd_header { - enum dmub_cmd_type type : 8; - unsigned int reserved0 : 16; + unsigned int type : 8; + unsigned int sub_type : 8; + unsigned int reserved0 : 8; unsigned int payload_bytes : 6; /* up to 60 bytes */ - unsigned int reserved : 2; + unsigned int reserved1 : 2; }; /* diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h new file mode 100644 index 000000000000..14f13e8a6f3b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_CMD_DAL_H_ +#define _DMUB_CMD_DAL_H_ + +/* + * Command IDs should be treated as stable ABI. + * Do not reuse or modify IDs. + */ + +enum dmub_cmd_psr_type { + DMUB_CMD__PSR_ENABLE = 0, + DMUB_CMD__PSR_DISABLE = 1, + DMUB_CMD__PSR_COPY_SETTINGS = 2, + DMUB_CMD__PSR_SET_LEVEL = 3, +}; + +#endif /* _DMUB_CMD_DAL_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h new file mode 100644 index 000000000000..b6deb8e2590f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_CMD_VBIOS_H_ +#define _DMUB_CMD_VBIOS_H_ + +/* + * Command IDs should be treated as stable ABI. + * Do not reuse or modify IDs. + */ + +enum dmub_cmd_vbios_type { + DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL = 0, + DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1, + DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2, + DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3, +}; + +#endif /* _DMUB_CMD_VBIOS_H_ */ -- cgit From 2f39835cc35033672ace41f32d653dbf2c0c8132 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 12 Nov 2019 13:46:34 -0500 Subject: drm/amd/display: Add shared DMCUB/driver firmware state cache window [Why] Scratch registers are limited on the DMCUB and we have an expanding list of state to track between driver and DMCUB. [How] Place shared state in cache window 6. The cache window size is aligned to the size of the cache line on the DMCUB to make it easy to invalidate. The shared state is intended to be read only from driver side so it's been marked as const. The use of volatile is intentional. The memory for the shared firmware state is memory mapped from the framebuffer memory. The DMCUB will flush its cache after modifying the region. There's no way for x86 to known whether this data is stale or not so we want to intentionally disable optimization to force the read at every access. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dmub/inc/dmub_fw_state.h | 73 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h | 8 ++- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 10 ++- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h | 3 +- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c | 12 +++- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h | 3 +- drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 27 ++++++-- 7 files changed, 125 insertions(+), 11 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h new file mode 100644 index 000000000000..c87b1ba7590e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h @@ -0,0 +1,73 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_FW_STATE_H_ +#define _DMUB_FW_STATE_H_ + +#include "dmub_types.h" + +#pragma pack(push, 1) + +struct dmub_fw_state { + /** + * @phy_initialized_during_fw_boot: + * + * Detects if VBIOS/VBL has ran before firmware boot. + * A value of 1 will usually mean S0i3 boot. + */ + uint8_t phy_initialized_during_fw_boot; + + /** + * @intialized_phy: + * + * Bit vector of initialized PHY. + */ + uint8_t initialized_phy; + + /** + * @enabled_phy: + * + * Bit vector of enabled PHY for DP alt mode switch tracking. + */ + uint8_t enabled_phy; + + /** + * @dmcu_fw_loaded: + * + * DMCU auto load state. + */ + uint8_t dmcu_fw_loaded; + + /** + * @psr_state: + * + * PSR state tracking. + */ + uint8_t psr_state; +}; + +#pragma pack(pop) + +#endif /* _DMUB_FW_STATE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h index fdedbe15e026..528243e35add 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -67,6 +67,7 @@ #include "dmub_types.h" #include "dmub_cmd.h" #include "dmub_rb.h" +#include "dmub_fw_state.h" #if defined(__cplusplus) extern "C" { @@ -102,7 +103,7 @@ enum dmub_window_id { DMUB_WINDOW_3_VBIOS, DMUB_WINDOW_4_MAILBOX, DMUB_WINDOW_5_TRACEBUFF, - DMUB_WINDOW_6_RESERVED, + DMUB_WINDOW_6_FW_STATE, DMUB_WINDOW_7_RESERVED, DMUB_WINDOW_TOTAL, }; @@ -241,7 +242,8 @@ struct dmub_srv_hw_funcs { const struct dmub_window *cw2, const struct dmub_window *cw3, const struct dmub_window *cw4, - const struct dmub_window *cw5); + const struct dmub_window *cw5, + const struct dmub_window *cw6); void (*setup_mailbox)(struct dmub_srv *dmub, const struct dmub_region *inbox1); @@ -296,11 +298,13 @@ struct dmub_srv_hw_params { * @asic: dmub asic identifier * @user_ctx: user provided context for the dmub_srv * @is_virtual: false if hardware support only + * @fw_state: dmub firmware state pointer */ struct dmub_srv { enum dmub_asic asic; void *user_ctx; bool is_virtual; + volatile const struct dmub_fw_state *fw_state; /* private: internal use only */ struct dmub_srv_base_funcs funcs; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index 302dd3d4b77d..951ea7053c7e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -76,7 +76,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw2, const struct dmub_window *cw3, const struct dmub_window *cw4, - const struct dmub_window *cw5) + const struct dmub_window *cw5, + const struct dmub_window *cw6) { REG_WRITE(DMCUB_REGION3_CW2_OFFSET, cw2->offset.u.low_part); REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, cw2->offset.u.high_part); @@ -106,6 +107,13 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, DMCUB_REGION3_CW5_ENABLE, 1); + + REG_WRITE(DMCUB_REGION3_CW6_OFFSET, cw6->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, cw6->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); + REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, + DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, + DMCUB_REGION3_CW6_ENABLE, 1); } void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index ca7db03b94f7..e70a57573467 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -46,7 +46,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw2, const struct dmub_window *cw3, const struct dmub_window *cw4, - const struct dmub_window *cw5); + const struct dmub_window *cw5, + const struct dmub_window *cw6); void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c index b9dc2dd645eb..9cea7a2d8dbf 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c @@ -78,7 +78,8 @@ void dmub_dcn21_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw2, const struct dmub_window *cw3, const struct dmub_window *cw4, - const struct dmub_window *cw5) + const struct dmub_window *cw5, + const struct dmub_window *cw6) { union dmub_addr offset; uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; @@ -118,6 +119,15 @@ void dmub_dcn21_setup_windows(struct dmub_srv *dmub, REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, DMCUB_REGION3_CW5_ENABLE, 1); + + dmub_dcn21_translate_addr(&cw6->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); + REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, + DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, + DMCUB_REGION3_CW6_ENABLE, 1); } bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h index 9e5f195e288f..f7a93a5dcfa5 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h @@ -38,7 +38,8 @@ void dmub_dcn21_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw2, const struct dmub_window *cw3, const struct dmub_window *cw4, - const struct dmub_window *cw5); + const struct dmub_window *cw5, + const struct dmub_window *cw6); bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 70c7a4be9ccc..5f39166d3c08 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -48,13 +48,14 @@ /* Number of windows in use. */ -#define DMUB_NUM_WINDOWS (DMUB_WINDOW_5_TRACEBUFF + 1) +#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1) /* Base addresses. */ #define DMUB_CW0_BASE (0x60000000) #define DMUB_CW1_BASE (0x61000000) #define DMUB_CW3_BASE (0x63000000) #define DMUB_CW5_BASE (0x65000000) +#define DMUB_CW6_BASE (0x66000000) static inline uint32_t dmub_align(uint32_t val, uint32_t factor) { @@ -158,6 +159,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; + struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; if (!dmub->sw_init) return DMUB_STATUS_INVALID; @@ -184,7 +186,13 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, trace_buff->base = dmub_align(mail->top, 256); trace_buff->top = trace_buff->base + TRACE_BUF_SIZE; - out->fb_size = dmub_align(trace_buff->top, 4096); + fw_state->base = dmub_align(trace_buff->top, 256); + + /* Align firmware state to size of cache line. */ + fw_state->top = + fw_state->base + dmub_align(sizeof(struct dmub_fw_state), 64); + + out->fb_size = dmub_align(fw_state->top, 4096); return DMUB_STATUS_OK; } @@ -258,9 +266,10 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS]; struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; + struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; struct dmub_rb_init_params rb_params; - struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5; + struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; struct dmub_region inbox1; if (!dmub->sw_init) @@ -286,7 +295,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, if (dmub->hw_funcs.reset) dmub->hw_funcs.reset(dmub); - if (inst_fb && data_fb && bios_fb && mail_fb) { + if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb && + fw_state_fb) { cw2.offset.quad_part = data_fb->gpu_addr; cw2.region.base = DMUB_CW0_BASE + inst_fb->size; cw2.region.top = cw2.region.base + data_fb->size; @@ -306,8 +316,15 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, cw5.region.base = DMUB_CW5_BASE; cw5.region.top = cw5.region.base + tracebuff_fb->size; + cw6.offset.quad_part = fw_state_fb->gpu_addr; + cw6.region.base = DMUB_CW6_BASE; + cw6.region.top = cw6.region.base + fw_state_fb->size; + + dmub->fw_state = fw_state_fb->cpu_addr; + if (dmub->hw_funcs.setup_windows) - dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5); + dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, + &cw5, &cw6); if (dmub->hw_funcs.setup_mailbox) dmub->hw_funcs.setup_mailbox(dmub, &inbox1); -- cgit From e0600a94343cc772b33705015cb8c05bac32bccb Mon Sep 17 00:00:00 2001 From: Joseph Gravenor Date: Tue, 12 Nov 2019 17:48:36 -0500 Subject: drm/amd/display: update sr latency for renoir when using lpddr4 [Why] DF team has produced more optimized sr latency numbers, for lpddr4 [How] change the sr laency in the lpddr4 wm table to the new latency number Signed-off-by: Joseph Gravenor Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 901e7035bf8e..37230d3d94a0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -563,32 +563,32 @@ struct wm_table lpddr4_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 12.5, - .sr_enter_plus_exit_time_us = 17.0, + .sr_exit_time_us = 5.32, + .sr_enter_plus_exit_time_us = 6.38, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 12.5, - .sr_enter_plus_exit_time_us = 17.0, + .sr_exit_time_us = 9.82, + .sr_enter_plus_exit_time_us = 11.196, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 12.5, - .sr_enter_plus_exit_time_us = 17.0, + .sr_exit_time_us = 9.89, + .sr_enter_plus_exit_time_us = 11.24, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 12.5, - .sr_enter_plus_exit_time_us = 17.0, + .sr_exit_time_us = 9.748, + .sr_enter_plus_exit_time_us = 11.102, .valid = true, }, } -- cgit From 9a35b016b99fb39891102c17e65f0b63d1d90d94 Mon Sep 17 00:00:00 2001 From: Noah Abradjian Date: Wed, 13 Nov 2019 13:55:53 -0500 Subject: drm/amd/display: Remove flag check in mpcc update [Why] MPCC programming was being missed during certain split pipe enables due to full_update flag not being true. This caused a momentary flash on half the screen. After discussion, determined we should not have that flag check within update_mpcc, as it should always perform full programming when called. [How] Remove flag check. We call update_blending within insert_plane, so we do not need to replace its call from the if block. Signed-off-by: Noah Abradjian Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 392542f84ca2..3b8b9944d972 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2158,12 +2158,6 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) */ mpcc_id = hubp->inst; - /* If there is no full update, don't need to touch MPC tree*/ - if (!pipe_ctx->plane_state->update_flags.bits.full_update) { - mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - return; - } - /* check if this MPCC is already being used */ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); /* remove MPCC if being used */ -- cgit From 5fd21b394cfec3611a4ddf49ec61c8920c001899 Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Tue, 12 Nov 2019 11:07:24 -0500 Subject: drm/amd/display: check for repeater when setting aux_rd_interval. [Why] When training with repeater the aux read interval must be set to repeater specific aux_red_interval. This value is always 100us for CR. [How] Check for repeater when setting the aux_rd_interval in channel equalization. Use the right offset in the aux_rd_interval array Signed-off-by: abdoulaye berthe Reviewed-by: Wenjing Liu Acked-by: George Shen Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 015fa0c52746..dfcd6421ee01 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -906,10 +906,10 @@ static enum link_training_result perform_channel_equalization_sequence( /* 3. wait for receiver to lock-on*/ wait_time_microsec = lt_settings->eq_pattern_time; - if (!link->is_lttpr_mode_transparent) + if (is_repeater(link, offset)) wait_time_microsec = translate_training_aux_read_interval( - link->dpcd_caps.lttpr_caps.aux_rd_interval[offset]); + link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); wait_for_training_aux_rd_interval( link, -- cgit From 8b0fbb368c96df747d958bfb0842da9842e6d0a3 Mon Sep 17 00:00:00 2001 From: Noah Abradjian Date: Wed, 13 Nov 2019 16:56:06 -0500 Subject: drm/amd/display: Modify logic for when to wait for mpcc idle [Why] I was advised that we may need to check for mpcc idle in more cases than just when opp_changed is true. Also, mpcc_inst is equal to pipe_idx, so remove for loop. [How] Remove opp_changed flag check and mpcc_inst loop. Signed-off-by: Noah Abradjian Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 3b8b9944d972..6a0d280bafba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1349,16 +1349,16 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.mpcc || plane_state->update_flags.bits.global_alpha_change || plane_state->update_flags.bits.per_pixel_alpha_change) { - /* Need mpcc to be idle if changing opp */ - if (pipe_ctx->update_flags.bits.opp_changed) { - struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; - int mpcc_inst; - - for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { - if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) - continue; + // MPCC inst is equal to pipe index in practice + int mpcc_inst = pipe_ctx->pipe_idx; + int opp_inst; + int opp_count = dc->res_pool->res_cap->num_opp; + + for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { + if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); - old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; + dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; + break; } } hws->funcs.update_mpcc(dc, pipe_ctx); -- cgit From 5c7b0f38522e702ce0143a9ef62908eb953808ac Mon Sep 17 00:00:00 2001 From: Noah Abradjian Date: Wed, 13 Nov 2019 17:06:40 -0500 Subject: drm/amd/display: Remove redundant call [Why] I was advised that we don't need this call of program_front_end, as earlier and later calls in the same sequence are sufficient. [How] Remove first call of program_front_end in dc_commit_state_no_check. Signed-off-by: Noah Abradjian Reviewed-by: Yongqiang Sun Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 55f22a1c0aa5..39fe38cb39b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1167,8 +1167,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->stream_status[i].plane_count, context); /* use new pipe config in new context */ } - if (dc->hwss.program_front_end_for_ctx) - dc->hwss.program_front_end_for_ctx(dc, context); /* Program hardware */ for (i = 0; i < dc->res_pool->pipe_count; i++) { -- cgit From dcd65857a7815ef94735f73d01e0d0d7e1ff2090 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 13 Nov 2019 17:03:37 -0500 Subject: drm/amd/display: add dc dsc functions to return bpp range for pixel encoding [why] Need to support 6 bpp for 420 pixel encoding only. [how] Add a dc function to determine what bpp range can be supported for given pixel encoding. Signed-off-by: Wenjing Liu Reviewed-by: Nikola Cornij Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dsc.h | 8 ++++-- drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 38 +++++++++++++++++++++++------ 2 files changed, 37 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index cc9915e545cd..d98b89bad353 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -52,8 +52,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, bool dc_dsc_compute_bandwidth_range( const struct display_stream_compressor *dsc, const uint32_t dsc_min_slice_height_override, - const uint32_t min_kbps, - const uint32_t max_kbps, + const uint32_t min_bpp, + const uint32_t max_bpp, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, struct dc_dsc_bw_range *range); @@ -65,4 +65,8 @@ bool dc_dsc_compute_config( uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg); + +bool dc_dsc_get_bpp_range_for_pixel_encoding(enum dc_pixel_encoding pixel_enc, + uint32_t *min_bpp, + uint32_t *max_bpp); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index ec86ba73a039..febae6cc7295 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -31,16 +31,12 @@ struct dc_dsc_policy { bool use_min_slices_h; int max_slices_h; // Maximum available if 0 int min_sice_height; // Must not be less than 8 - int max_target_bpp; - int min_target_bpp; // Minimum target bits per pixel }; const struct dc_dsc_policy dsc_policy = { .use_min_slices_h = true, // DSC Policy: Use minimum number of slices that fits the pixel clock .max_slices_h = 0, // DSC Policy: Use max available slices (in our case 4 for or 8, depending on the mode) .min_sice_height = 108, // DSC Policy: Use slice height recommended by VESA DSC Spreadsheet user guide - .max_target_bpp = 16, - .min_target_bpp = 8, }; @@ -374,7 +370,6 @@ static void get_dsc_bandwidth_range( * or if it couldn't be applied based on DSC policy. */ static bool decide_dsc_target_bpp_x16( - const struct dc_dsc_policy *policy, const struct dsc_enc_caps *dsc_common_caps, const int target_bandwidth_kbps, const struct dc_crtc_timing *timing, @@ -382,10 +377,13 @@ static bool decide_dsc_target_bpp_x16( { bool should_use_dsc = false; struct dc_dsc_bw_range range; + uint32_t min_target_bpp = 0; + uint32_t max_target_bpp = 0; memset(&range, 0, sizeof(range)); - get_dsc_bandwidth_range(policy->min_target_bpp, policy->max_target_bpp, + dc_dsc_get_bpp_range_for_pixel_encoding(timing->pixel_encoding, &min_target_bpp, &max_target_bpp); + get_dsc_bandwidth_range(min_target_bpp, max_target_bpp, dsc_common_caps, timing, &range); if (target_bandwidth_kbps >= range.stream_kbps) { /* enough bandwidth without dsc */ @@ -599,7 +597,7 @@ static bool setup_dsc_config( goto done; if (target_bandwidth_kbps > 0) { - is_dsc_possible = decide_dsc_target_bpp_x16(&dsc_policy, &dsc_common_caps, target_bandwidth_kbps, timing, &target_bpp); + is_dsc_possible = decide_dsc_target_bpp_x16(&dsc_common_caps, target_bandwidth_kbps, timing, &target_bpp); dsc_cfg->bits_per_pixel = target_bpp; } if (!is_dsc_possible) @@ -906,3 +904,29 @@ bool dc_dsc_compute_config( timing, dsc_min_slice_height_override, dsc_cfg); return is_dsc_possible; } + +bool dc_dsc_get_bpp_range_for_pixel_encoding(enum dc_pixel_encoding pixel_enc, + uint32_t *min_bpp, + uint32_t *max_bpp) +{ + bool result = true; + + switch (pixel_enc) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + case PIXEL_ENCODING_YCBCR422: + *min_bpp = 8; + *max_bpp = 16; + break; + case PIXEL_ENCODING_YCBCR420: + *min_bpp = 6; + *max_bpp = 16; + break; + default: + *min_bpp = 0; + *max_bpp = 0; + result = false; + } + + return result; +} -- cgit From c5e53707e2606dda260b27f5ded637f1f851a659 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 13 Nov 2019 15:59:51 -0500 Subject: drm/amd/display: remove spam DSC log [why] add_dsc_to_stream_resource could be called for validation. Failing validation is completely fine. However failing it inside commit streams is bad. This code could be triggered for both contexts. The function itself cannot distinguish the caller, which makes it impossible to output the log only in the meaningful case (commit streams). Signed-off-by: Wenjing Liu Reviewed-by: Nikola Cornij Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 322556c587b2..f7f1f58fa655 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1582,7 +1582,6 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc, /* The number of DSCs can be less than the number of pipes */ if (!pipe_ctx->stream_res.dsc) { - dm_output_to_console("No DSCs available\n"); result = DC_NO_DSC_RESOURCE; } -- cgit From 799a5f74d11ef7c2e8d86b143d4f1b64793411b6 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Fri, 15 Nov 2019 11:24:54 -0500 Subject: drm/amd/display: add dsc policy getter dc needs to expose its internal dsc policy. Signed-off-by: Wenjing Liu Reviewed-by: Nikola Cornij Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dsc.h | 14 +++- drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 103 +++++++++++++++++----------- 2 files changed, 75 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index d98b89bad353..8ec09813ee17 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -45,6 +45,14 @@ struct display_stream_compressor { int inst; }; +struct dc_dsc_policy { + bool use_min_slices_h; + int max_slices_h; // Maximum available if 0 + int min_slice_height; // Must not be less than 8 + uint32_t max_target_bpp; + uint32_t min_target_bpp; +}; + bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps); @@ -66,7 +74,7 @@ bool dc_dsc_compute_config( const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg); -bool dc_dsc_get_bpp_range_for_pixel_encoding(enum dc_pixel_encoding pixel_enc, - uint32_t *min_bpp, - uint32_t *max_bpp); +void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, + struct dc_dsc_policy *policy); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index febae6cc7295..d2423ad1fac2 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -27,19 +27,6 @@ #include #include "dc.h" -struct dc_dsc_policy { - bool use_min_slices_h; - int max_slices_h; // Maximum available if 0 - int min_sice_height; // Must not be less than 8 -}; - -const struct dc_dsc_policy dsc_policy = { - .use_min_slices_h = true, // DSC Policy: Use minimum number of slices that fits the pixel clock - .max_slices_h = 0, // DSC Policy: Use max available slices (in our case 4 for or 8, depending on the mode) - .min_sice_height = 108, // DSC Policy: Use slice height recommended by VESA DSC Spreadsheet user guide -}; - - /* This module's internal functions */ static uint32_t dc_dsc_bandwidth_in_kbps_from_timing( @@ -370,6 +357,7 @@ static void get_dsc_bandwidth_range( * or if it couldn't be applied based on DSC policy. */ static bool decide_dsc_target_bpp_x16( + const struct dc_dsc_policy *policy, const struct dsc_enc_caps *dsc_common_caps, const int target_bandwidth_kbps, const struct dc_crtc_timing *timing, @@ -377,13 +365,10 @@ static bool decide_dsc_target_bpp_x16( { bool should_use_dsc = false; struct dc_dsc_bw_range range; - uint32_t min_target_bpp = 0; - uint32_t max_target_bpp = 0; memset(&range, 0, sizeof(range)); - dc_dsc_get_bpp_range_for_pixel_encoding(timing->pixel_encoding, &min_target_bpp, &max_target_bpp); - get_dsc_bandwidth_range(min_target_bpp, max_target_bpp, + get_dsc_bandwidth_range(policy->min_target_bpp, policy->max_target_bpp, dsc_common_caps, timing, &range); if (target_bandwidth_kbps >= range.stream_kbps) { /* enough bandwidth without dsc */ @@ -579,9 +564,11 @@ static bool setup_dsc_config( bool is_dsc_possible = false; int pic_height; int slice_height; + struct dc_dsc_policy policy; memset(dsc_cfg, 0, sizeof(struct dc_dsc_config)); + dc_dsc_get_policy_for_timing(timing, &policy); pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right; pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; @@ -597,7 +584,12 @@ static bool setup_dsc_config( goto done; if (target_bandwidth_kbps > 0) { - is_dsc_possible = decide_dsc_target_bpp_x16(&dsc_common_caps, target_bandwidth_kbps, timing, &target_bpp); + is_dsc_possible = decide_dsc_target_bpp_x16( + &policy, + &dsc_common_caps, + target_bandwidth_kbps, + timing, + &target_bpp); dsc_cfg->bits_per_pixel = target_bpp; } if (!is_dsc_possible) @@ -699,20 +691,20 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - if (dsc_policy.use_min_slices_h) { + if (policy.use_min_slices_h) { if (min_slices_h > 0) num_slices_h = min_slices_h; else if (max_slices_h > 0) { // Fall back to max slices if min slices is not working out - if (dsc_policy.max_slices_h) - num_slices_h = min(dsc_policy.max_slices_h, max_slices_h); + if (policy.max_slices_h) + num_slices_h = min(policy.max_slices_h, max_slices_h); else num_slices_h = max_slices_h; } else is_dsc_possible = false; } else { if (max_slices_h > 0) { - if (dsc_policy.max_slices_h) - num_slices_h = min(dsc_policy.max_slices_h, max_slices_h); + if (policy.max_slices_h) + num_slices_h = min(policy.max_slices_h, max_slices_h); else num_slices_h = max_slices_h; } else if (min_slices_h > 0) // Fall back to min slices if max slices is not possible @@ -734,7 +726,7 @@ static bool setup_dsc_config( // Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by. // For 4:2:0 make sure the slice height is divisible by 2 as well. if (min_slice_height_override == 0) - slice_height = min(dsc_policy.min_sice_height, pic_height); + slice_height = min(policy.min_slice_height, pic_height); else slice_height = min(min_slice_height_override, pic_height); @@ -905,28 +897,61 @@ bool dc_dsc_compute_config( return is_dsc_possible; } -bool dc_dsc_get_bpp_range_for_pixel_encoding(enum dc_pixel_encoding pixel_enc, - uint32_t *min_bpp, - uint32_t *max_bpp) +void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc_dsc_policy *policy) { - bool result = true; + uint32_t bpc = 0; + + policy->min_target_bpp = 0; + policy->max_target_bpp = 0; + + /* DSC Policy: Use minimum number of slices that fits the pixel clock */ + policy->use_min_slices_h = true; - switch (pixel_enc) { + /* DSC Policy: Use max available slices + * (in our case 4 for or 8, depending on the mode) + */ + policy->max_slices_h = 0; + + /* DSC Policy: Use slice height recommended + * by VESA DSC Spreadsheet user guide + */ + policy->min_slice_height = 108; + + /* DSC Policy: follow DP specs with an internal upper limit to 16 bpp + * for better interoperability + */ + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + bpc = 8; + break; + case COLOR_DEPTH_101010: + bpc = 10; + break; + case COLOR_DEPTH_121212: + bpc = 12; + break; + default: + return; + } + switch (timing->pixel_encoding) { case PIXEL_ENCODING_RGB: case PIXEL_ENCODING_YCBCR444: - case PIXEL_ENCODING_YCBCR422: - *min_bpp = 8; - *max_bpp = 16; + case PIXEL_ENCODING_YCBCR422: /* assume no YCbCr422 native support */ + /* DP specs limits to 8 */ + policy->min_target_bpp = 8; + /* DP specs limits to 3 x bpc */ + policy->max_target_bpp = 3 * bpc; break; case PIXEL_ENCODING_YCBCR420: - *min_bpp = 6; - *max_bpp = 16; + /* DP specs limits to 6 */ + policy->min_target_bpp = 6; + /* DP specs limits to 1.5 x bpc assume bpc is an even number */ + policy->max_target_bpp = bpc * 3 / 2; break; default: - *min_bpp = 0; - *max_bpp = 0; - result = false; + return; } - - return result; + /* internal upper limit to 16 bpp */ + if (policy->max_target_bpp > 16) + policy->max_target_bpp = 16; } -- cgit From ddba76274fd59e664f4ea82ca45ea13d179ef179 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Fri, 15 Nov 2019 14:58:53 -0500 Subject: drm/amd/display: Limit NV12 chroma workaround [Why] It is causing green Line at the bottom of SDR 480p MPO playback [How] Limit workaround to vertical > 512 Signed-off-by: Anthony Koo Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 38661b9c61f8..332bf3d3a664 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -200,7 +200,7 @@ void hubp21_set_viewport( * Disable w/a when rotated 180 degrees, causes vertical chroma offset */ patched_viewport_height = viewport_c->height; - if (viewport_c->height != 0 && debug->nv12_iflip_vm_wa && + if (debug->nv12_iflip_vm_wa && viewport_c->height > 512 && rotation != ROTATION_ANGLE_180) { int pte_row_height = 0; int pte_rows = 0; -- cgit From 4ed79864de180ff2d39da26305e2a7e9c72bc38e Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sat, 9 Nov 2019 18:30:40 -0500 Subject: drm/amd/display: fix cursor positioning for multiplane cases [Why] Cursor position needs to take into account plane scaling as well. [How] Translate cursor coords from stream space to plane space. Signed-off-by: Aric Cyr Reviewed-by: Anthony Koo Acked-by: Leo Li Acked-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 33 ++++++++++++++++------ 1 file changed, 24 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 9551fefb9d1d..61d2f1233f8c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2913,15 +2913,30 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .rotation = pipe_ctx->plane_state->rotation, .mirror = pipe_ctx->plane_state->horizontal_mirror }; - uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x; - uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y; - uint32_t x_offset = min(x_plane, pos_cpy.x); - uint32_t y_offset = min(y_plane, pos_cpy.y); - - pos_cpy.x -= x_offset; - pos_cpy.y -= y_offset; - pos_cpy.x_hotspot += (x_plane - x_offset); - pos_cpy.y_hotspot += (y_plane - y_offset); + + int x_plane = pipe_ctx->plane_state->dst_rect.x; + int y_plane = pipe_ctx->plane_state->dst_rect.y; + int x_pos = pos_cpy.x; + int y_pos = pos_cpy.y; + + // translate cursor from stream space to plane space + x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width / + pipe_ctx->plane_state->dst_rect.width; + y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height / + pipe_ctx->plane_state->dst_rect.height; + + if (x_pos < 0) { + pos_cpy.x_hotspot -= x_pos; + x_pos = 0; + } + + if (y_pos < 0) { + pos_cpy.y_hotspot -= y_pos; + y_pos = 0; + } + + pos_cpy.x = (uint32_t)x_pos; + pos_cpy.y = (uint32_t)y_pos; if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) -- cgit From 00853a4f7dd52e4529f681a685073f1533e1ed19 Mon Sep 17 00:00:00 2001 From: Amanda Liu Date: Fri, 15 Nov 2019 17:07:27 -0500 Subject: drm/amd/display: Fix screen tearing on vrr tests [Why] Screen tearing is present in tests when setting the frame rate to certain fps [How] Revert previous optimizations for low frame rates. Signed-off-by: Amanda Liu Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../drm/amd/display/modules/freesync/freesync.c | 32 +++++++++------------- .../gpu/drm/amd/display/modules/inc/mod_freesync.h | 1 - 2 files changed, 13 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index fa57885503d4..a94700940fd6 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -37,8 +37,8 @@ #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) /* Number of elements in the render times cache array */ #define RENDER_TIMES_MAX_COUNT 10 -/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */ -#define BTR_MAX_MARGIN 2500 +/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ +#define BTR_EXIT_MARGIN 2000 /* Threshold to change BTR multiplier (to avoid frequent changes) */ #define BTR_DRIFT_MARGIN 2000 /*Threshold to exit fixed refresh rate*/ @@ -254,22 +254,24 @@ static void apply_below_the_range(struct core_freesync *core_freesync, unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF; unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF; unsigned int frames_to_insert = 0; + unsigned int min_frame_duration_in_ns = 0; + unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; unsigned int delta_from_mid_point_delta_in_us; - unsigned int max_render_time_in_us = - in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us; + + min_frame_duration_in_ns = ((unsigned int) (div64_u64( + (1000000000ULL * 1000000), + in_out_vrr->max_refresh_in_uhz))); /* Program BTR */ - if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) { + if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { /* Exit Below the Range */ if (in_out_vrr->btr.btr_active) { in_out_vrr->btr.frame_counter = 0; in_out_vrr->btr.btr_active = false; } - } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) { + } else if (last_render_time_in_us > max_render_time_in_us) { /* Enter Below the Range */ - if (!in_out_vrr->btr.btr_active) { - in_out_vrr->btr.btr_active = true; - } + in_out_vrr->btr.btr_active = true; } /* BTR set to "not active" so disengage */ @@ -325,9 +327,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync, /* Choose number of frames to insert based on how close it * can get to the mid point of the variable range. */ - if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us && - (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 || - mid_point_frames_floor < 2)) { + if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { frames_to_insert = mid_point_frames_ceil; delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - delta_from_mid_point_in_us_1; @@ -343,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync, if (in_out_vrr->btr.frames_to_insert != 0 && delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) { if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) < - max_render_time_in_us) && + in_out_vrr->max_duration_in_us) && ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) > in_out_vrr->min_duration_in_us)) frames_to_insert = in_out_vrr->btr.frames_to_insert; @@ -796,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, refresh_range = in_out_vrr->max_refresh_in_uhz - in_out_vrr->min_refresh_in_uhz; - in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - - 2 * in_out_vrr->min_duration_in_us; - if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) - in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; - in_out_vrr->supported = true; } @@ -816,7 +811,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->btr.inserted_duration_in_us = 0; in_out_vrr->btr.frames_to_insert = 0; in_out_vrr->btr.frame_counter = 0; - in_out_vrr->btr.mid_point_in_us = (in_out_vrr->min_duration_in_us + in_out_vrr->max_duration_in_us) / 2; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index dbe7835aabcf..dc187844d10b 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -92,7 +92,6 @@ struct mod_vrr_params_btr { uint32_t inserted_duration_in_us; uint32_t frames_to_insert; uint32_t frame_counter; - uint32_t margin_in_us; }; struct mod_vrr_params_fixed_refresh { -- cgit From 0beb54039d46702a7bc66ee1f36378785b450421 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 15 Nov 2019 12:04:25 -0500 Subject: drm/amd/display: update dispclk and dppclk vco frequency Value obtained from DV is not allowing 8k60 CTA mode with DSC to pass, after checking real value being used in hw, find out that correct value is 3600, which will allow that mode. Signed-off-by: Eric Yang Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index fef11d57d2b7..8fa63929d3b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -255,7 +255,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .vmm_page_size_bytes = 4096, .dram_clock_change_latency_us = 23.84, .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3550, + .dispclk_dppclk_vco_speed_mhz = 3600, .xfc_bus_transport_time_us = 4, .xfc_xbuf_latency_tolerance_us = 4, .use_urgent_burst_bw = 1, -- cgit From e66164105d2f4779387b80d726680ec9875b970f Mon Sep 17 00:00:00 2001 From: Reza Amini Date: Fri, 15 Nov 2019 17:39:12 -0500 Subject: drm/amd/display: Implement DePQ for DCN2 [Why] Need support for more color management in 10bit surface. [How] Provide support for DePQ for 10bit surface Signed-off-by: Reza Amini Reviewed-by: Krunoslav Kovac Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c | 3 +++ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 5 +++++ 2 files changed, 8 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c index 2d112c316424..05a3e7f97ef0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c @@ -149,6 +149,9 @@ void dpp2_set_degamma( case IPP_DEGAMMA_MODE_HW_xvYCC: REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); break; + case IPP_DEGAMMA_MODE_USER_PWL: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + break; default: BREAK_TO_DEBUGGER(); break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 6a0d280bafba..32878a65bdd7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -878,6 +878,11 @@ bool dcn20_set_input_transfer_func(struct dc *dc, IPP_DEGAMMA_MODE_BYPASS); break; case TRANSFER_FUNCTION_PQ: + dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); + cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); + dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); + result = true; + break; default: result = false; break; -- cgit From 9ec53e11c76670a0ba0fb10fd023fab2b30d7e50 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 18 Nov 2019 08:33:34 -0500 Subject: drm/amd/display: 3.2.62 Signed-off-by: Aric Cyr Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 4c7a2882a512..c24639080371 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.61" +#define DC_VER "3.2.62" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit From 186a1fb79c6ce6b52eaa168c64d5f5964a3bd645 Mon Sep 17 00:00:00 2001 From: Krunoslav Kovac Date: Fri, 15 Nov 2019 10:00:46 -0500 Subject: drm/amd/display: Change HDR_MULT check [Why] Currently we require HDR_MULT >= 1.0 There are scenarios where we need < 1.0 [How] Only guard against 0 - it will black-screen image. It is up to higher-level logic to decide what HDR_MULT values are allowed in each particular case. Signed-off-by: Krunoslav Kovac Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 61d2f1233f8c..3996fef56948 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2390,17 +2390,13 @@ void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx) struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult; uint32_t hw_mult = 0x1f000; // 1.0 default multiplier struct custom_float_format fmt; - bool mult_negative; // True if fixed31_32 sign bit indicates negative value - uint32_t mult_int; // int component of fixed31_32 fmt.exponenta_bits = 6; fmt.mantissa_bits = 12; fmt.sign = true; - mult_negative = multiplier.value >> 63 != 0; - mult_int = multiplier.value >> 32; - if (mult_int && !mult_negative) // Check if greater than 1 + if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0 convert_to_custom_float_format(multiplier, &fmt, &hw_mult); pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier( -- cgit From 772c0b59ad94e8d828355aec5494f02f807eb504 Mon Sep 17 00:00:00 2001 From: George Shen Date: Fri, 15 Nov 2019 18:56:57 -0500 Subject: drm/amd/display: Increase the number of retries after AUX DEFER [Why] When a timeout occurs after a DEFER, some devices require more retries than in the case of a regular timeout. [How] In a timeout occurrence, check whether a DEFER has occurred before the timeout and retry MAX_DEFER_RETRIES retries times instead of MAX_TIMEOUT_RETRIES. Signed-off-by: George Shen Reviewed-by: Tony Cheng Acked-by: Abdoulaye Berthe Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 32 +++++++++++++++++++--------- 1 file changed, 22 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index f7626cd70ec8..191b68b8163a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -611,6 +611,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, uint8_t reply; bool payload_reply = true; enum aux_channel_operation_result operation_result; + bool retry_on_defer = false; + int aux_ack_retries = 0, aux_defer_retries = 0, aux_i2c_defer_retries = 0, @@ -641,8 +643,9 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, break; case AUX_TRANSACTION_REPLY_AUX_DEFER: - case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: + retry_on_defer = true; + case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { goto fail; } else { @@ -675,15 +678,24 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, break; case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: - if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) - goto fail; - else { - /* - * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts - * According to the DP spec there should be 3 retries total - * with a 400us wait inbetween each. Hardware already waits - * for 550us therefore no wait is required here. - */ + // Check whether a DEFER had occurred before the timeout. + // If so, treat timeout as a DEFER. + if (retry_on_defer) { + if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) + goto fail; + else if (payload->defer_delay > 0) + msleep(payload->defer_delay); + } else { + if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) + goto fail; + else { + /* + * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts + * According to the DP spec there should be 3 retries total + * with a 400us wait inbetween each. Hardware already waits + * for 550us therefore no wait is required here. + */ + } } break; -- cgit From 92855074616f9c44229c1c3ec16421cc65e8e609 Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Mon, 18 Nov 2019 13:45:50 -0500 Subject: drm/amd/display: Compare clock state member to determine optimization. [Why] It seems always request passive flip on RN due to incorrect compare clock state to determine optization. [How] Instead of calling memcmp, compare clock state member to determine the condition. Signed-off-by: Yongqiang Sun Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 37230d3d94a0..de51ef12e33a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -471,12 +471,28 @@ static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base) } +static bool rn_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b) +{ + if (a->dispclk_khz != b->dispclk_khz) + return false; + else if (a->dppclk_khz != b->dppclk_khz) + return false; + else if (a->dcfclk_khz != b->dcfclk_khz) + return false; + else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) + return false; + + return true; +} + + static struct clk_mgr_funcs dcn21_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = rn_update_clocks, .init_clocks = rn_init_clocks, .enable_pme_wa = rn_enable_pme_wa, - /* .dump_clk_registers = rn_dump_clk_registers, */ + .are_clock_states_equal = rn_are_clock_states_equal, .notify_wm_ranges = rn_notify_wm_ranges }; -- cgit From 71e6bd2ac207b875c54133ffa023ca8059b0c853 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 28 Oct 2019 15:42:29 -0400 Subject: drm/amd/display: update dml related structs In preparation for further changes Signed-off-by: Dmytro Laktyushkin Reviewed-by: Chris Park Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 3 +++ drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index f7f1f58fa655..cfc69919ef9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2033,6 +2033,7 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable; if (pipes[pipe_cnt].pipe.src.viewport_height > 1080) pipes[pipe_cnt].pipe.src.viewport_height = 1080; + pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height; pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */ pipes[pipe_cnt].pipe.src.source_format = dm_444_32; pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ @@ -2066,6 +2067,7 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width; pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height; pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; + pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height; if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 516396d53d01..220d5e610f1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -99,6 +99,7 @@ struct _vcs_dpi_soc_bounding_box_st { unsigned int num_chans; unsigned int vmm_page_size_bytes; unsigned int hostvm_min_page_size_bytes; + unsigned int gpuvm_min_page_size_bytes; double dram_clock_change_latency_us; double dummy_pstate_latency_us; double writeback_dram_clock_change_latency_us; @@ -224,6 +225,7 @@ struct _vcs_dpi_display_pipe_source_params_st { int source_scan; int sw_mode; int macro_tile_size; + unsigned int surface_height_y; unsigned int viewport_width; unsigned int viewport_height; unsigned int viewport_y_y; @@ -400,6 +402,7 @@ struct _vcs_dpi_display_rq_misc_params_st { struct _vcs_dpi_display_rq_params_st { unsigned char yuv420; unsigned char yuv420_10bpc; + unsigned char rgbe_alpha; display_rq_misc_params_st misc; display_rq_sizing_params_st sizing; display_rq_dlg_params_st dlg; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index b1c2b79e42b6..15b72a8b5174 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -231,7 +231,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib) mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes; - mode_lib->vba.GPUVMMinPageSize = soc->vmm_page_size_bytes / 1024; + mode_lib->vba.GPUVMMinPageSize = soc->gpuvm_min_page_size_bytes / 1024; mode_lib->vba.HostVMMinPageSize = soc->hostvm_min_page_size_bytes / 1024; // Set the voltage scaling clocks as the defaults. Most of these will // be set to different values by the test -- cgit From c14f2507be0bd81e2fd8a14abb9dc7196af60a78 Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Mon, 18 Nov 2019 12:22:06 -0500 Subject: drm/amd/display: correct log message for lttpr [Why] When setting lttpr mode, the new mode to bet is not logged properly. [How] Update log message to show the right mode. Signed-off-by: abdoulaye berthe Reviewed-by: George Shen Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index dfcd6421ee01..42aa889fd0f5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1219,7 +1219,7 @@ static void configure_lttpr_mode(struct dc_link *link) uint8_t repeater_id; uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; - DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, (uint8_t *)&repeater_mode, @@ -1227,7 +1227,7 @@ static void configure_lttpr_mode(struct dc_link *link) if (!link->is_lttpr_mode_transparent) { - DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; core_link_write_dpcd(link, -- cgit From ad51b4ac12f785aecf39f1a8b235e0681efde84b Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 18 Nov 2019 13:31:04 -0500 Subject: drm/amd/display: Extend DMCUB offload testing into dcn20/21 [Why] To quickly validate whether DMCUB is running and accepting commands for offload testing we want to intercept a common sequence as part of modeset programming. [How] OTG enable will cause the most impact in terms of golden register changes and it's a single register write. This approach was previously done in dcn10 code when it was shared with dcn20 but it wasn't ported over to the dcn20 code. Port over start, execute and wait sequence into dcn20_optc. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index f5854a5d2b76..673c83e2afd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -59,11 +59,16 @@ bool optc2_enable_crtc(struct timing_generator *optc) REG_UPDATE(CONTROL, VTG0_ENABLE, 1); + REG_SEQ_START(); + /* Enable CRTC */ REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 3, OTG_MASTER_EN, 1); + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + return true; } -- cgit From 4b4c5638c08d6be3fc4380bda23aa46bbd158073 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Thu, 5 Dec 2019 14:21:31 -0500 Subject: drm/amdgpu: Fix BACO entry failure in NAVI10. BACO feature must be kept enabled to allow entry into BACO state in SMU during runtime suspend. Signed-off-by: Andrey Grodzovsky Reviewed-by: Evan Quan Reviewed-by: Alex Deucher Tested-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index c30f9a281e4c..6dddd7818558 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1383,7 +1383,7 @@ static int smu_suspend(void *handle) if (ret) return ret; - if (adev->in_gpu_reset && baco_feature_is_enabled) { + if (baco_feature_is_enabled) { ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true); if (ret) { pr_warn("set BACO feature enabled failed, return %d\n", ret); -- cgit From 6ceeb144b1d6952a36afa6c29718beac575f2a3f Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 28 Nov 2019 14:51:46 +0100 Subject: drm/amdgpu: move VM eviction decision into amdgpu_vm.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a page tables needs to be evicted the VM code should decide if that is possible or not. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 +---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 + 3 files changed, 24 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 61d9b7774d42..80fc3d8ee1cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1489,11 +1489,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, struct dma_fence *f; int i; - /* Don't evict VM page tables while they are busy, otherwise we can't - * cleanly handle page faults. - */ if (bo->type == ttm_bo_type_kernel && - !dma_resv_test_signaled_rcu(bo->base.resv, true)) + !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) return false; /* If bo is a KFD BO, check if the bo belongs to the current process. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f20b572d2438..145ea95d977a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2499,6 +2499,28 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, kfree(bo_va); } +/** + * amdgpu_vm_evictable - check if we can evict a VM + * + * @bo: A page table of the VM. + * + * Check if it is possible to evict a VM. + */ +bool amdgpu_vm_evictable(struct amdgpu_bo *bo) +{ + struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; + + /* Page tables of a destroyed VM can go away immediately */ + if (!bo_base || !bo_base->vm) + return true; + + /* Don't evict VM page tables while they are busy */ + if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) + return false; + + return true; +} + /** * amdgpu_vm_bo_invalidate - mark the bo as invalid * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67f946b0c515..d3b7a5cff56c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -378,6 +378,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); +bool amdgpu_vm_evictable(struct amdgpu_bo *bo); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo, bool evicted); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); -- cgit From e095fc17bbd216ccac7fe06132067ae6e91f01c3 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 29 Nov 2019 11:33:54 +0100 Subject: drm/amdgpu: explicitely sync to VM updates v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allows us to reduce the overhead while syncing to fences a bit. v2: also drop adev parameter from the functions Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 8 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 19 ++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 13 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 38 +++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h | 8 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 2 +- 7 files changed, 51 insertions(+), 40 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index b6d1958d514f..d8db5ecdf9c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -358,7 +358,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) if (ret) return ret; - return amdgpu_sync_fence(NULL, sync, vm->last_update, false); + return amdgpu_sync_fence(sync, vm->last_update, false); } static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) @@ -751,7 +751,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); - amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); + amdgpu_sync_fence(sync, bo_va->last_pt_update, false); return 0; } @@ -770,7 +770,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev, return ret; } - return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); + return amdgpu_sync_fence(sync, bo_va->last_pt_update, false); } static int map_bo_to_gpuvm(struct amdgpu_device *adev, @@ -2045,7 +2045,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) pr_debug("Memory eviction: Validate BOs failed. Try again\n"); goto validate_map_fail; } - ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false); + ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false); if (ret) { pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); goto validate_map_fail; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index a169ff16277f..06fb535a504f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -797,29 +797,23 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(adev, &p->job->sync, - fpriv->prt_va->last_pt_update, false); + r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update); if (r) return r; if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { - struct dma_fence *f; - bo_va = fpriv->csa_va; BUG_ON(!bo_va); r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; - f = bo_va->last_pt_update; - r = amdgpu_sync_fence(adev, &p->job->sync, f, false); + r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update); if (r) return r; } amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct dma_fence *f; - /* ignore duplicates */ bo = ttm_to_amdgpu_bo(e->tv.bo); if (!bo) @@ -833,8 +827,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - f = bo_va->last_pt_update; - r = amdgpu_sync_fence(adev, &p->job->sync, f, false); + r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update); if (r) return r; } @@ -847,7 +840,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false); + r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update); if (r) return r; @@ -989,7 +982,7 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, dma_fence_put(old); } - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + r = amdgpu_sync_fence(&p->job->sync, fence, true); dma_fence_put(fence); if (r) return r; @@ -1011,7 +1004,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, return r; } - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + r = amdgpu_sync_fence(&p->job->sync, fence, true); dma_fence_put(fence); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 6f9289735e31..3a67f6c046d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -206,7 +206,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, int r; if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait)) - return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false); + return amdgpu_sync_fence(sync, ring->vmid_wait, false); fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); if (!fences) @@ -241,7 +241,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, return -ENOMEM; } - r = amdgpu_sync_fence(adev, sync, &array->base, false); + r = amdgpu_sync_fence(sync, &array->base, false); dma_fence_put(ring->vmid_wait); ring->vmid_wait = &array->base; return r; @@ -294,7 +294,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); if (tmp) { *id = NULL; - r = amdgpu_sync_fence(adev, sync, tmp, false); + r = amdgpu_sync_fence(sync, tmp, false); return r; } needs_flush = true; @@ -303,7 +303,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, /* Good we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false); + r = amdgpu_sync_fence(&(*id)->active, fence, false); if (r) return r; @@ -375,7 +375,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, /* Good, we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false); + r = amdgpu_sync_fence(&(*id)->active, fence, false); if (r) return r; @@ -435,8 +435,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, id = idle; /* Remember this submission as user of the VMID */ - r = amdgpu_sync_fence(ring->adev, &id->active, - fence, false); + r = amdgpu_sync_fence(&id->active, fence, false); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 4fb20e870e63..73328d0c741d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -193,8 +193,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, fence = amdgpu_sync_get_fence(&job->sync, &explicit); if (fence && explicit) { if (drm_sched_dependency_optimized(fence, s_entity)) { - r = amdgpu_sync_fence(ring->adev, &job->sched_sync, - fence, false); + r = amdgpu_sync_fence(&job->sched_sync, fence, false); if (r) DRM_ERROR("Error adding fence (%d)\n", r); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 95e5e93edd18..f1e5fbef54d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -129,7 +129,8 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep, * Tries to add the fence to an existing hash entry. Returns true when an entry * was found, false otherwise. */ -static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit) +static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, + bool explicit) { struct amdgpu_sync_entry *e; @@ -151,19 +152,18 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, * amdgpu_sync_fence - remember to sync to this fence * * @sync: sync object to add fence to - * @fence: fence to sync to + * @f: fence to sync to + * @explicit: if this is an explicit dependency * + * Add the fence to the sync object. */ -int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct dma_fence *f, bool explicit) +int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f, + bool explicit) { struct amdgpu_sync_entry *e; if (!f) return 0; - if (amdgpu_sync_same_dev(adev, f) && - amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM) - amdgpu_sync_keep_later(&sync->last_vm_update, f); if (amdgpu_sync_add_later(sync, f, explicit)) return 0; @@ -179,6 +179,24 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, return 0; } +/** + * amdgpu_sync_vm_fence - remember to sync to this VM fence + * + * @adev: amdgpu device + * @sync: sync object to add fence to + * @fence: the VM fence to add + * + * Add the fence to the sync object and remember it as VM update. + */ +int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence) +{ + if (!fence) + return 0; + + amdgpu_sync_keep_later(&sync->last_vm_update, fence); + return amdgpu_sync_fence(sync, fence, false); +} + /** * amdgpu_sync_resv - sync to a reservation object * @@ -204,7 +222,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, /* always sync to the exclusive fence */ f = dma_resv_get_excl(resv); - r = amdgpu_sync_fence(adev, sync, f, false); + r = amdgpu_sync_fence(sync, f, false); flist = dma_resv_get_list(resv); if (!flist || r) @@ -239,7 +257,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, continue; } - r = amdgpu_sync_fence(adev, sync, f, false); + r = amdgpu_sync_fence(sync, f, false); if (r) break; } @@ -340,7 +358,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone) hash_for_each_safe(source->fences, i, tmp, e, node) { f = e->fence; if (!dma_fence_is_signaled(f)) { - r = amdgpu_sync_fence(NULL, clone, f, e->explicit); + r = amdgpu_sync_fence(clone, f, e->explicit); if (r) return r; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index b5f1778a2319..d62c2b81d92b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -40,8 +40,9 @@ struct amdgpu_sync { }; void amdgpu_sync_create(struct amdgpu_sync *sync); -int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct dma_fence *f, bool explicit); +int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f, + bool explicit); +int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence); int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct dma_resv *resv, @@ -49,7 +50,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, bool explicit_sync); struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); -struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit); +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, + bool *explicit); int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone); int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr); void amdgpu_sync_free(struct amdgpu_sync *sync); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 832db59f441e..50f487666977 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -71,7 +71,7 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, p->num_dw_left = ndw; /* Wait for moves to be completed */ - r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false); + r = amdgpu_sync_fence(&p->job->sync, exclusive, false); if (r) return r; -- cgit From 90b69cdc5f159be09d799257661e119a26ea4233 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 29 Nov 2019 12:44:07 +0100 Subject: drm/amdgpu: stop adding VM updates fences to the resv obj MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't add the VM update fences to the resv object and remove the handling to stop implicitely syncing to them. Ongoing updates prevent page tables from being evicted and we manually block for all updates to complete before releasing PDs and PTS. This way we can do updates even without the resv obj locked. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 10 ++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 30 +++++++++++++++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 11 ++++++++--- 4 files changed, 42 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index f1e5fbef54d8..a09b6b9c27d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -240,13 +240,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, continue; if (amdgpu_sync_same_dev(adev, f)) { - /* VM updates are only interesting - * for other VM updates and moves. + /* VM updates only sync with moves but not with user + * command submissions or KFD evictions fences */ - if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) && - (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) && - ((owner == AMDGPU_FENCE_OWNER_VM) != - (fence_owner == AMDGPU_FENCE_OWNER_VM))) + if (owner == AMDGPU_FENCE_OWNER_VM && + fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) continue; /* Ignore fence from the same owner and explicit one as diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 145ea95d977a..81cbb8105a68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -562,8 +562,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, { entry->priority = 0; entry->tv.bo = &vm->root.base.bo->tbo; - /* One for the VM updates, one for TTM and one for the CS job */ - entry->tv.num_shared = 3; + /* One for TTM and one for the CS job */ + entry->tv.num_shared = 2; entry->user_pages = NULL; list_add(&entry->tv.head, validated); } @@ -2518,6 +2518,11 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) return false; + /* Don't evict VM page tables while they are updated */ + if (!dma_fence_is_signaled(bo_base->vm->last_direct) || + !dma_fence_is_signaled(bo_base->vm->last_delayed)) + return false; + return true; } @@ -2683,8 +2688,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, - true, true, timeout); + timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, + true, true, timeout); + if (timeout <= 0) + return timeout; + + timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout); + if (timeout <= 0) + return timeout; + + return dma_fence_wait_timeout(vm->last_delayed, true, timeout); } /** @@ -2753,6 +2766,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, else vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->last_update = NULL; + vm->last_direct = dma_fence_get_stub(); + vm->last_delayed = dma_fence_get_stub(); amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) @@ -2803,6 +2818,8 @@ error_free_root: vm->root.base.bo = NULL; error_free_delayed: + dma_fence_put(vm->last_direct); + dma_fence_put(vm->last_delayed); drm_sched_entity_destroy(&vm->delayed); error_free_direct: @@ -3003,6 +3020,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->pasid = 0; } + dma_fence_wait(vm->last_direct, false); + dma_fence_put(vm->last_direct); + dma_fence_wait(vm->last_delayed, false); + dma_fence_put(vm->last_delayed); + list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { amdgpu_vm_prt_fini(adev, vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index d3b7a5cff56c..6c2124b10820 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -266,6 +266,10 @@ struct amdgpu_vm { struct drm_sched_entity direct; struct drm_sched_entity delayed; + /* Last submission to the scheduler entities */ + struct dma_fence *last_direct; + struct dma_fence *last_delayed; + unsigned int pasid; /* dedicated to vm */ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 50f487666977..19b7f80758f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -95,11 +95,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, struct dma_fence **fence) { - struct amdgpu_bo *root = p->vm->root.base.bo; struct amdgpu_ib *ib = p->job->ibs; struct drm_sched_entity *entity; + struct dma_fence *f, *tmp; struct amdgpu_ring *ring; - struct dma_fence *f; int r; entity = p->direct ? &p->vm->direct : &p->vm->delayed; @@ -112,7 +111,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, if (r) goto error; - amdgpu_bo_fence(root, f, true); + tmp = dma_fence_get(f); + if (p->direct) + swap(p->vm->last_direct, tmp); + else + swap(p->vm->last_delayed, tmp); + dma_fence_put(tmp); + if (fence && !p->direct) swap(*fence, f); dma_fence_put(f); -- cgit From b4ff0f8a85f3c523942e57b716e8722e7f6799cc Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 4 Dec 2019 13:29:49 +0100 Subject: drm/amdgpu: add VM eviction lock v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows to invalidate VM entries without taking the reservation lock. v3: use -EBUSY Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 39 ++++++++++++++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 4 ++++ 2 files changed, 36 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 81cbb8105a68..8f26504a59a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -656,7 +656,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param) { struct amdgpu_vm_bo_base *bo_base, *tmp; - int r = 0; + int r; vm->bulk_moveable &= list_empty(&vm->evicted); @@ -665,7 +665,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = validate(param, bo); if (r) - break; + return r; if (bo->tbo.type != ttm_bo_type_kernel) { amdgpu_vm_bo_moved(bo_base); @@ -678,7 +678,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, } } - return r; + mutex_lock(&vm->eviction_lock); + vm->evicting = false; + mutex_unlock(&vm->eviction_lock); + + return 0; } /** @@ -1555,15 +1559,25 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (!(flags & AMDGPU_PTE_VALID)) owner = AMDGPU_FENCE_OWNER_KFD; + mutex_lock(&vm->eviction_lock); + if (vm->evicting) { + r = -EBUSY; + goto error_unlock; + } + r = vm->update_funcs->prepare(¶ms, owner, exclusive); if (r) - return r; + goto error_unlock; r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); if (r) - return r; + goto error_unlock; - return vm->update_funcs->commit(¶ms, fence); + r = vm->update_funcs->commit(¶ms, fence); + +error_unlock: + mutex_unlock(&vm->eviction_lock); + return r; } /** @@ -2518,11 +2532,19 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) return false; + /* Try to block ongoing updates */ + if (!mutex_trylock(&bo_base->vm->eviction_lock)) + return false; + /* Don't evict VM page tables while they are updated */ if (!dma_fence_is_signaled(bo_base->vm->last_direct) || - !dma_fence_is_signaled(bo_base->vm->last_delayed)) + !dma_fence_is_signaled(bo_base->vm->last_delayed)) { + mutex_unlock(&bo_base->vm->eviction_lock); return false; + } + bo_base->vm->evicting = true; + mutex_unlock(&bo_base->vm->eviction_lock); return true; } @@ -2769,6 +2791,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->last_direct = dma_fence_get_stub(); vm->last_delayed = dma_fence_get_stub(); + mutex_init(&vm->eviction_lock); + vm->evicting = false; + amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 6c2124b10820..7e0eb36da27d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -239,6 +239,10 @@ struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; + /* Lock to prevent eviction while we are updating page tables */ + struct mutex eviction_lock; + bool evicting; + /* BOs who needs a validation */ struct list_head evicted; -- cgit From d515959125f24767d02e82587a11e444eeba0e7b Mon Sep 17 00:00:00 2001 From: Thong Thai Date: Fri, 6 Dec 2019 09:19:20 -0500 Subject: Revert "drm/amdgpu: enable VCN DPG on Raven and Raven2" This reverts commit a4840d91c984f93b2acdcd44441d624bbc1af0d2. Reverting due to power efficiency issues seen on Raven 1 and 2 when DPG mode is enabled. Signed-off-by: Thong Thai Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 48649f55f3ba..863c88be055f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1181,9 +1181,7 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_VCN | - AMD_PG_SUPPORT_VCN_DPG; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; } else if (adev->pdev->device == 0x15d8) { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1226,9 +1224,7 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_VCN | - AMD_PG_SUPPORT_VCN_DPG; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; } break; case CHIP_ARCTURUS: -- cgit From 0d6f39bb77109b1837dc863e08c739039b753ce4 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 6 Dec 2019 18:09:19 +0800 Subject: drm/amdgpu: fix resume failures due to psp fw loading sequence change (v3) this fix the regression caused by asd/ta loading sequence adjustment recently. asd/ta loading was move out from hw_start and should also be applied to psp_resume. otherwise those fw loading will be ignored in resume phase. v2: add the mutex unlock for asd loading failure case v3: merge the error handling to failed tag Signed-off-by: Hawking Zhang Reviewed-by: Guchun Chen Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c84529718bcb..c14f2ccd0677 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1721,6 +1721,39 @@ static int psp_resume(void *handle) if (ret) goto failed; + ret = psp_asd_load(psp); + if (ret) { + DRM_ERROR("PSP load asd failed!\n"); + goto failed; + } + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + ret = psp_xgmi_initialize(psp); + /* Warning the XGMI seesion initialize failure + * Instead of stop driver initialization + */ + if (ret) + dev_err(psp->adev->dev, + "XGMI: Failed to initialize XGMI session\n"); + } + + if (psp->adev->psp.ta_fw) { + ret = psp_ras_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "RAS: Failed to initialize RAS\n"); + + ret = psp_hdcp_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "HDCP: Failed to initialize HDCP\n"); + + ret = psp_dtm_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "DTM: Failed to initialize DTM\n"); + } + mutex_unlock(&adev->firmware.mutex); return 0; -- cgit From f794913ce82637572fb92633837cfac4f1345ad0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 5 Dec 2019 16:38:01 -0500 Subject: drm/amdgpu/display: add fallthrough comment To avoid a compiler warning. Reviewed-by: Zhan Liu Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 191b68b8163a..f1a5d2c6aa37 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -645,6 +645,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, case AUX_TRANSACTION_REPLY_AUX_DEFER: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: retry_on_defer = true; + /* fall through */ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { goto fail; -- cgit From ebcef76b6c2209ef4e86e8640b8a5ac87721b584 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 26 Nov 2019 15:05:07 +0800 Subject: drm/amd/powerplay: drop unnecessary warning prompt As the check may be done with purpose and the warning output will be confusing. Signed-off-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 199c96caa882..17eeb546c550 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -280,10 +280,8 @@ static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER return -EINVAL; mapping = arcturus_workload_map[profile]; - if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU power source: %d\n", profile); + if (!(mapping.valid_mapping)) return -EINVAL; - } return mapping.map_to; } -- cgit From fcb1fe9c9e0031ff02aaa7532822807d27968044 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 4 Dec 2019 17:29:52 +0800 Subject: drm/amd/powerplay: pre-check the SMU state before issuing message Abort the message issuing if the SMU was not in the right state. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 16 ++++++++-------- drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index eae3657cf1f3..f5469ad43929 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -80,15 +80,13 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) for (i = 0; i < timeout; i++) { cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0) - break; + return cur_value == 0x1 ? 0 : -EIO; + udelay(1); } /* timeout means wrong logic */ - if (i == timeout) - return -ETIME; - - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; + return -ETIME; } int @@ -104,9 +102,11 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, return index; ret = smu_v11_0_wait_for_response(smu); - if (ret) - pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n", - smu_get_message_name(smu, msg), index, param, ret); + if (ret) { + pr_err("Msg issuing pre-check failed and " + "SMU may be not in the right state!\n"); + return ret; + } WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 269a7d73b58d..951aa4570a04 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -66,15 +66,13 @@ int smu_v12_0_wait_for_response(struct smu_context *smu) for (i = 0; i < adev->usec_timeout; i++) { cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0) - break; + return cur_value == 0x1 ? 0 : -EIO; + udelay(1); } /* timeout means wrong logic */ - if (i == adev->usec_timeout) - return -ETIME; - - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; + return -ETIME; } int @@ -90,9 +88,11 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, return index; ret = smu_v12_0_wait_for_response(smu); - if (ret) - pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n", - index, ret, param); + if (ret) { + pr_err("Msg issuing pre-check failed and " + "SMU may be not in the right state!\n"); + return ret; + } WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); -- cgit From feffbaac36d333096ad12ee50a7235481123161f Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 26 Nov 2019 17:56:58 +0800 Subject: drm/amdgpu: add condition to enable baco for ras recovery Switch to baco reset method for ras recovery if the PMFW supported. If not, keep the original reset method. v2: revise the condition Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 863c88be055f..09dca499a880 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -557,7 +557,8 @@ static int soc15_mode2_reset(struct amdgpu_device *adev) static enum amd_reset_method soc15_asic_reset_method(struct amdgpu_device *adev) { - bool baco_reset; + bool baco_reset = false; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); switch (adev->asic_type) { case CHIP_RAVEN: @@ -571,18 +572,15 @@ soc15_asic_reset_method(struct amdgpu_device *adev) case CHIP_VEGA20: if (adev->psp.sos_fw_version >= 0x80067) soc15_asic_get_baco_capability(adev, &baco_reset); - else - baco_reset = false; - if (baco_reset) { - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (hive || (ras && ras->supported)) - baco_reset = false; - } + /* + * 1. PMFW version > 0x284300: all cases use baco + * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco + */ + if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400) + baco_reset = false; break; default: - baco_reset = false; break; } -- cgit From 418899d6157516fb0b7acad5e37653e60b285852 Mon Sep 17 00:00:00 2001 From: changzhu Date: Tue, 10 Dec 2019 10:23:09 +0800 Subject: drm/amdgpu: avoid using invalidate semaphore for picasso It may cause timeout waiting for sem acquire in VM flush when using invalidate semaphore for picasso. So it needs to avoid using invalidate semaphore for piasso. Signed-off-by: changzhu Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 365a88ecdec4..a208b2883c03 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -468,8 +468,11 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, */ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) { + if ((vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) && + (!(adev->asic_type == CHIP_RAVEN && + adev->rev_id < 0x8 && + adev->pdev->device == 0x15d8))) { for (j = 0; j < adev->usec_timeout; j++) { /* a read return value of 1 means semaphore acuqire */ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); @@ -499,8 +502,11 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) + if ((vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) && + (!(adev->asic_type == CHIP_RAVEN && + adev->rev_id < 0x8 && + adev->pdev->device == 0x15d8))) /* * add semaphore release after invalidation, * write with 0 means semaphore release @@ -531,8 +537,11 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, */ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || - ring->funcs->vmhub == AMDGPU_MMHUB_1) + if ((ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) && + (!(adev->asic_type == CHIP_RAVEN && + adev->rev_id < 0x8 && + adev->pdev->device == 0x15d8))) /* a read return value of 1 means semaphore acuqire */ amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_sem + eng, 0x1, 0x1); @@ -548,8 +557,11 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, req, 1 << vmid); /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || - ring->funcs->vmhub == AMDGPU_MMHUB_1) + if ((ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) && + (!(adev->asic_type == CHIP_RAVEN && + adev->rev_id < 0x8 && + adev->pdev->device == 0x15d8))) /* * add semaphore release after invalidation, * write with 0 means semaphore release -- cgit From ae46533b17639460b812d4dffa27f453e961ae82 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 6 Dec 2019 11:30:45 +0800 Subject: drm/amd/powerplay: clear VBIOS scratchs on baco exit V2 This is needed for coming asic init on performing gpu reset. V2: use non-asic specific programing way Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index f5469ad43929..7781d245f8ef 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1676,10 +1676,17 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) } } else { ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco); + if (ret) + goto out; + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, BIF_DOORBELL_INT_CNTL, DOORBELL_INTERRUPT_DISABLE, 0); WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + /* clear vbios scratch 6 and 7 for coming asic reinit */ + WREG32(adev->bios_scratch_reg_offset + 6, 0); + WREG32(adev->bios_scratch_reg_offset + 7, 0); } if (ret) goto out; -- cgit From 8fa6a7b0b3fdc6ec5454c7976f2ea1c18cd265bc Mon Sep 17 00:00:00 2001 From: Xiaomeng Hou Date: Wed, 4 Dec 2019 13:35:18 +0800 Subject: drm/amd/powerplay: implement interface to retrieve gpu temperature for renoir add sensor interface of get gpu temperature for debugfs. Signed-off-by: Xiaomeng Hou Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index b076589c79ef..9befa50eafae 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -412,6 +412,24 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { return ret; } +static int renoir_get_gpu_temperature(struct smu_context *smu, uint32_t *value) +{ + int ret = 0; + SmuMetrics_t metrics; + + if (!value) + return -EINVAL; + + ret = renoir_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + *value = (metrics.GfxTemperature / 100) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return 0; +} + static int renoir_get_current_activity_percent(struct smu_context *smu, enum amd_pp_sensors sensor, uint32_t *value) @@ -766,6 +784,10 @@ static int renoir_read_sensor(struct smu_context *smu, ret = renoir_get_current_activity_percent(smu, sensor, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_TEMP: + ret = renoir_get_gpu_temperature(smu, (uint32_t *)data); + *size = 4; + break; default: ret = smu_v12_0_read_sensor(smu, sensor, data, size); } -- cgit From 0b97bd6cde1d4bec07e6f5b72cba32fae2739c3b Mon Sep 17 00:00:00 2001 From: Xiaomeng Hou Date: Wed, 4 Dec 2019 15:17:38 +0800 Subject: drm/amd/powerplay: implement interface to retrieve clock freq for renoir implement smu12 get_clk_freq interface to get clock frequency like MCLK/SCLK. Signed-off-by: Xiaomeng Hou Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h | 4 +++ drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 49 +++++++++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 20 +++++++++++ 3 files changed, 73 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index 922973b7e29f..ad68a5623033 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -75,6 +75,10 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu); int smu_v12_0_populate_smc_tables(struct smu_context *smu); +int smu_v12_0_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_id, + uint32_t *value); + int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 9befa50eafae..2e1941b8034e 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -31,6 +31,9 @@ #include "renoir_ppt.h" +#define CLK_MAP(clk, index) \ + [SMU_##clk] = {1, (index)} + #define MSG_MAP(msg, index) \ [SMU_MSG_##msg] = {1, (index)} @@ -104,6 +107,14 @@ static struct smu_12_0_cmn2aisc_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq), }; +static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(GFXCLK, CLOCK_GFXCLK), + CLK_MAP(SCLK, CLOCK_GFXCLK), + CLK_MAP(SOCCLK, CLOCK_SOCCLK), + CLK_MAP(UCLK, CLOCK_UMCCLK), + CLK_MAP(MCLK, CLOCK_UMCCLK), +}; + static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = { TAB_MAP_VALID(WATERMARKS), TAB_MAP_INVALID(CUSTOM_DPM), @@ -125,6 +136,21 @@ static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index) return mapping.map_to; } +static int renoir_get_smu_clk_index(struct smu_context *smc, uint32_t index) +{ + struct smu_12_0_cmn2aisc_mapping mapping; + + if (index >= SMU_CLK_COUNT) + return -EINVAL; + + mapping = renoir_clk_map[index]; + if (!(mapping.valid_mapping)) { + return -EINVAL; + } + + return mapping.map_to; +} + static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index) { struct smu_12_0_cmn2aisc_mapping mapping; @@ -352,6 +378,26 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) return ret; } +static int renoir_get_current_clk_freq_by_table(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + int ret = 0, clk_id = 0; + SmuMetrics_t metrics; + + ret = renoir_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) + return clk_id; + + *value = metrics.ClockFrequency[clk_id]; + + return ret; +} + static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) { int ret = 0, i = 0; @@ -798,6 +844,7 @@ static int renoir_read_sensor(struct smu_context *smu, static const struct pptable_funcs renoir_ppt_funcs = { .get_smu_msg_index = renoir_get_smu_msg_index, + .get_smu_clk_index = renoir_get_smu_clk_index, .get_smu_table_index = renoir_get_smu_table_index, .tables_init = renoir_tables_init, .set_power_state = NULL, @@ -806,6 +853,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .get_current_power_state = renoir_get_current_power_state, .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable, .dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable, + .get_current_clk_freq_by_table = renoir_get_current_clk_freq_by_table, .force_dpm_limit_value = renoir_force_dpm_limit_value, .unforce_dpm_levels = renoir_unforce_dpm_levels, .get_workload_type = renoir_get_workload_type, @@ -829,6 +877,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .init_smc_tables = smu_v12_0_init_smc_tables, .fini_smc_tables = smu_v12_0_fini_smc_tables, .populate_smc_tables = smu_v12_0_populate_smc_tables, + .get_current_clk_freq = smu_v12_0_get_current_clk_freq, .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, .mode2_reset = smu_v12_0_mode2_reset, .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range, diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 951aa4570a04..0e10cec5e9c3 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -330,6 +330,26 @@ int smu_v12_0_populate_smc_tables(struct smu_context *smu) return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); } +int smu_v12_0_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_id, + uint32_t *value) +{ + int ret = 0; + uint32_t freq = 0; + + if (clk_id >= SMU_CLK_COUNT || !value) + return -EINVAL; + + ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq); + if (ret) + return ret; + + freq *= 100; + *value = freq; + + return ret; +} + int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { -- cgit From e304adc06ebfa0ea399b181d795f2efc51dcda12 Mon Sep 17 00:00:00 2001 From: Xiaomeng Hou Date: Wed, 4 Dec 2019 16:16:30 +0800 Subject: drm/amd/powerplay: implement the get_enabled_mask callback for smu12 implement sensor interface of feature mask for debugfs. Signed-off-by: Xiaomeng Hou Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h | 3 +++ drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 1 + drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 29 +++++++++++++++++++++++++++ 3 files changed, 33 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index ad68a5623033..3f1cd06e273c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -75,6 +75,9 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu); int smu_v12_0_populate_smc_tables(struct smu_context *smu); +int smu_v12_0_get_enabled_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num); + int smu_v12_0_get_current_clk_freq(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value); diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 2e1941b8034e..4726166b76cd 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -877,6 +877,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .init_smc_tables = smu_v12_0_init_smc_tables, .fini_smc_tables = smu_v12_0_fini_smc_tables, .populate_smc_tables = smu_v12_0_populate_smc_tables, + .get_enabled_mask = smu_v12_0_get_enabled_mask, .get_current_clk_freq = smu_v12_0_get_current_clk_freq, .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, .mode2_reset = smu_v12_0_mode2_reset, diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 0e10cec5e9c3..2ac7f2f231b6 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -330,6 +330,35 @@ int smu_v12_0_populate_smc_tables(struct smu_context *smu) return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); } +int smu_v12_0_get_enabled_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num) +{ + uint32_t feature_mask_high = 0, feature_mask_low = 0; + int ret = 0; + + if (!feature_mask || num < 2) + return -EINVAL; + + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); + if (ret) + return ret; + ret = smu_read_smc_arg(smu, &feature_mask_high); + if (ret) + return ret; + + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); + if (ret) + return ret; + ret = smu_read_smc_arg(smu, &feature_mask_low); + if (ret) + return ret; + + feature_mask[0] = feature_mask_low; + feature_mask[1] = feature_mask_high; + + return ret; +} + int smu_v12_0_get_current_clk_freq(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value) -- cgit From a0250689cbb5a8e32f816b3c4778920ce7c44d91 Mon Sep 17 00:00:00 2001 From: Xiaomeng Hou Date: Wed, 4 Dec 2019 17:01:21 +0800 Subject: drm/amd/powerplay: correct the value retrieved through GPU_LOAD sensor interface the unit of variable AverageGfxActivity defined in smu12 metrics struct is centi, so the retrieved value should be divided by 100 before return. Signed-off-by: Xiaomeng Hou Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 4726166b76cd..89a54f8e08d3 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -492,7 +492,7 @@ static int renoir_get_current_activity_percent(struct smu_context *smu, switch (sensor) { case AMDGPU_PP_SENSOR_GPU_LOAD: - *value = metrics.AverageGfxActivity; + *value = metrics.AverageGfxActivity / 100; break; default: pr_err("Invalid sensor for retrieving clock activity\n"); -- cgit From 4cf781c24c3bc8cc50f8013143aa20b26e9217e8 Mon Sep 17 00:00:00 2001 From: John Clements Date: Wed, 11 Dec 2019 10:18:55 +0800 Subject: drm/amdgpu: Added RAS UMC error query support for Arcturus Updated UMC 6.1 function set to support UMC 6.1.1 and 6.1.2 devices Reviewed-by: Alex Deucher Signed-off-by: John Clements Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 9 +++- drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 78 ++++++++++++++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/umc_v6_1.h | 3 +- 3 files changed, 74 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index a208b2883c03..53dfc82ca171 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -708,11 +708,18 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.funcs = &umc_v6_0_funcs; break; case CHIP_VEGA20: + adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; + adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; + adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; + adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; + adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; + adev->umc.funcs = &umc_v6_1_funcs; + break; case CHIP_ARCTURUS: adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; - adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET; + adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; adev->umc.funcs = &umc_v6_1_funcs; break; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 47c4b96b14d1..515eb50cd0f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -31,6 +31,14 @@ #define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10 +/* UMC 6_1_2 register offsets */ +#define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360 +#define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1 +#define mmUMCCH0_0_EccErrCnt_ARCT 0x0361 +#define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1 +#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2 +#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1 + /* * (addr / 256) * 8192, the higher 26 bits in ErrorAddr * is the index of 8KB block @@ -95,12 +103,25 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, uint64_t mc_umc_status; uint32_t mc_umc_status_addr; - ecc_err_cnt_sel_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); - ecc_err_cnt_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); + } else { + /* UMC 6_1_1 registers */ + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + } /* select the lower chip and check the error count */ ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); @@ -141,8 +162,17 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev uint64_t mc_umc_status; uint32_t mc_umc_status_addr; - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); + } else { + /* UMC 6_1_1 registers */ + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + } /* check the MCUMC_STATUS */ mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); @@ -179,8 +209,17 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, uint64_t mc_umc_status, err_addr, retired_page; struct eeprom_table_record *err_rec; - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); + } else { + /* UMC 6_1_1 registers */ + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + } /* skip error address process if -ENOMEM */ if (!err_data->err_addr) { @@ -241,10 +280,21 @@ static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev, uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; uint32_t ecc_err_cnt_addr; - ecc_err_cnt_sel_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); - ecc_err_cnt_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT); + } else { + /* UMC 6_1_1 registers */ + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + } /* select the lower chip and check the error count */ ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h index dab9cbd292c5..0ce1d323cfdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h @@ -35,7 +35,8 @@ /* total channel instances in one umc block */ #define UMC_V6_1_TOTAL_CHANNEL_NUM (UMC_V6_1_CHANNEL_INSTANCE_NUM * UMC_V6_1_UMC_INSTANCE_NUM) /* UMC regiser per channel offset */ -#define UMC_V6_1_PER_CHANNEL_OFFSET 0x800 +#define UMC_V6_1_PER_CHANNEL_OFFSET_VG20 0x800 +#define UMC_V6_1_PER_CHANNEL_OFFSET_ARCT 0x400 /* EccErrCnt max value */ #define UMC_V6_1_CE_CNT_MAX 0xffff -- cgit From c9ffa427db34e6896523f0ef0c172a0bbb77c9ad Mon Sep 17 00:00:00 2001 From: Yintian Tao Date: Wed, 30 Oct 2019 17:16:35 +0800 Subject: drm/amd/powerplay: enable pp one vf mode for vega10 Originally, due to the restriction from PSP and SMU, VF has to send message to hypervisor driver to handle powerplay change which is complicated and redundant. Currently, SMU and PSP can support VF to directly handle powerplay change by itself. Therefore, the old code about the handshake between VF and PF to handle powerplay will be removed and VF will use new the registers below to handshake with SMU. mmMP1_SMN_C2PMSG_101: register to handle SMU message mmMP1_SMN_C2PMSG_102: register to handle SMU parameter mmMP1_SMN_C2PMSG_103: register to handle SMU response v2: remove module parameter pp_one_vf v3: fix the parens v4: forbid vf to change smu feature v5: use hwmon_attributes_visible to skip sepicified hwmon atrribute v6: change skip condition at vega10_copy_table_to_smc Signed-off-by: Yintian Tao Acked-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 16 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 - drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 182 ++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 51 ------ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 14 +- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 78 --------- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h | 4 - drivers/gpu/drm/amd/amdgpu/soc15.c | 8 +- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 4 +- .../gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 15 +- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 16 ++ drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 30 ++-- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 156 ++++++++++++------ .../gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 3 + drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c | 56 +++++-- .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 14 ++ 17 files changed, 352 insertions(+), 300 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 5a8506182ade..95988ff7356d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1877,6 +1877,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) } } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_init_data_exchange(adev); + r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); @@ -1918,11 +1921,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) amdgpu_amdkfd_device_init(adev); init_failed: - if (amdgpu_sriov_vf(adev)) { - if (!r) - amdgpu_virt_init_data_exchange(adev); + if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, true); - } return r; } @@ -2822,7 +2822,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->virt.vf_errors.lock); hash_init(adev->mn_hash); mutex_init(&adev->lock_reset); - mutex_init(&adev->virt.dpm_mutex); mutex_init(&adev->psp.mutex); r = amdgpu_device_check_arguments(adev); @@ -3041,9 +3040,6 @@ fence_driver_init: amdgpu_fbdev_init(adev); - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) - amdgpu_pm_virt_sysfs_init(adev); - r = amdgpu_pm_sysfs_init(adev); if (r) { adev->pm_sysfs_en = false; @@ -3188,8 +3184,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) iounmap(adev->rmmio); adev->rmmio = NULL; amdgpu_device_doorbell_fini(adev); - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) - amdgpu_pm_virt_sysfs_fini(adev); amdgpu_debugfs_regs_cleanup(adev); device_remove_file(adev->dev, &dev_attr_pcie_replay_count); @@ -3670,6 +3664,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) goto error; + amdgpu_virt_init_data_exchange(adev); /* we need recover gart prior to run SMC/CP/SDMA resume */ amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); @@ -3687,7 +3682,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_amdkfd_post_reset(adev); error: - amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { amdgpu_inc_vram_lost(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index f7b9296fb1c4..60591dbc2097 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -703,10 +703,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (adev->pm.dpm_enabled) { dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; - } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && - adev->virt.ops->get_pp_clk) { - dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; - dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; } else { dev_info.max_engine_clock = adev->clock.default_sclk * 10; dev_info.max_memory_clock = adev->clock.default_mclk * 10; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 0c7324bc31a7..b32adda70bbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -159,6 +159,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) { if (adev->smu.ppt_funcs->get_current_power_state) pm = smu_get_current_power_state(&adev->smu); @@ -184,6 +187,9 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type state; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + if (strncmp("battery", buf, strlen("battery")) == 0) state = POWER_STATE_TYPE_BATTERY; else if (strncmp("balanced", buf, strlen("balanced")) == 0) @@ -283,7 +289,7 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_dpm_forced_level level = 0xff; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; if ((adev->flags & AMD_IS_PX) && @@ -320,6 +326,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, enum amd_dpm_forced_level current_level = 0xff; int ret = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + /* Can't force performance level when the card is off */ if ((adev->flags & AMD_IS_PX) && (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) @@ -348,19 +357,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, goto fail; } - /* handle sriov case here */ - if (amdgpu_sriov_vf(adev)) { - if (amdgim_is_hwperf(adev) && - adev->virt.ops->force_dpm_level) { - mutex_lock(&adev->pm.mutex); - adev->virt.ops->force_dpm_level(adev, level); - mutex_unlock(&adev->pm.mutex); - return count; - } else { - return -EINVAL; - } - } - if (is_support_sw_smu(adev)) current_level = smu_get_performance_level(&adev->smu); else if (adev->powerplay.pp_funcs->get_performance_level) @@ -440,6 +436,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, enum amd_pm_state_type pm = 0; int i = 0, ret = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) { pm = smu_get_current_power_state(smu); ret = smu_get_power_num_states(smu, &data); @@ -469,6 +468,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (adev->pp_force_state_enabled) return amdgpu_get_pp_cur_state(dev, attr, buf); else @@ -486,6 +488,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, unsigned long idx; int ret; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + if (strlen(buf) == 1) adev->pp_force_state_enabled = false; else if (is_support_sw_smu(adev)) @@ -535,6 +540,9 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, char *table = NULL; int size; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) { size = smu_sys_get_pp_table(&adev->smu, (void **)&table); if (size < 0) @@ -562,6 +570,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int ret = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + if (is_support_sw_smu(adev)) { ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); if (ret) @@ -654,6 +665,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, const char delimiter[3] = {' ', '\n', '\0'}; uint32_t type; + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + if (count > 127) return -EINVAL; @@ -726,6 +740,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; uint32_t size = 0; + if (amdgpu_sriov_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) { size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); @@ -770,6 +787,9 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev, uint64_t featuremask; int ret; + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + ret = kstrtou64(buf, 0, &featuremask); if (ret) return -EINVAL; @@ -796,6 +816,9 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) { return smu_sys_get_pp_feature_mask(&adev->smu, buf); } else if (adev->powerplay.pp_funcs->get_ppfeature_status) @@ -841,9 +864,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && - adev->virt.ops->get_pp_clk) - return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); @@ -899,8 +921,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev)) - return 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; ret = amdgpu_read_mask(buf, count, &mask); if (ret) @@ -924,9 +946,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && - adev->virt.ops->get_pp_clk) - return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); @@ -946,8 +967,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev)) - return 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; ret = amdgpu_read_mask(buf, count, &mask); if (ret) @@ -971,6 +992,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) @@ -989,6 +1013,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, int ret; uint32_t mask = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; @@ -1011,6 +1038,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) @@ -1029,6 +1059,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, int ret; uint32_t mask = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; @@ -1051,6 +1084,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) @@ -1069,6 +1105,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, int ret; uint32_t mask = 0; + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; @@ -1091,6 +1130,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) @@ -1109,6 +1151,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, int ret; uint32_t mask = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; @@ -1132,6 +1177,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; uint32_t value = 0; + if (amdgpu_sriov_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); else if (adev->powerplay.pp_funcs->get_sclk_od) @@ -1150,6 +1198,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, int ret; long int value; + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + ret = kstrtol(buf, 0, &value); if (ret) { @@ -1183,6 +1234,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; uint32_t value = 0; + if (amdgpu_sriov_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); else if (adev->powerplay.pp_funcs->get_mclk_od) @@ -1201,6 +1255,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, int ret; long int value; + if (amdgpu_sriov_vf(adev)) + return 0; + ret = kstrtol(buf, 0, &value); if (ret) { @@ -1253,6 +1310,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) return smu_get_power_profile_mode(&adev->smu, buf); else if (adev->powerplay.pp_funcs->get_power_profile_mode) @@ -1285,6 +1345,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, if (ret) goto fail; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { if (count < 2 || count > 127) return -EINVAL; @@ -1331,6 +1394,9 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int r, value, size = sizeof(value); + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + /* read the IP busy sensor */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size); @@ -1357,6 +1423,9 @@ static ssize_t amdgpu_get_memory_busy_percent(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int r, value, size = sizeof(value); + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + /* read the IP busy sensor */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size); @@ -1387,6 +1456,9 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; uint64_t count0, count1; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + amdgpu_asic_get_pcie_usage(adev, &count0, &count1); return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", count0, count1, pcie_get_mps(adev->pdev)); @@ -1409,6 +1481,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (adev->unique_id) return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); @@ -1603,6 +1678,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); u32 pwm_mode = 0; + if (is_support_sw_smu(adev)) { pwm_mode = smu_get_fan_control_mode(&adev->smu); } else { @@ -2053,6 +2129,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, int err; u32 value; + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + err = kstrtou32(buf, 10, &value); if (err) return err; @@ -2299,6 +2378,23 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; + /* under multi-vf mode, the hwmon attributes are all not supported */ + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + + /* there is no fan under pp one vf mode */ + if (amdgpu_sriov_is_pp_one_vf(adev) && + (attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || + attr == &sensor_dev_attr_fan1_input.dev_attr.attr || + attr == &sensor_dev_attr_fan1_min.dev_attr.attr || + attr == &sensor_dev_attr_fan1_max.dev_attr.attr || + attr == &sensor_dev_attr_fan1_target.dev_attr.attr || + attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) + return 0; + /* Skip fan attributes if fan is not present */ if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || @@ -2730,44 +2826,6 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) } } -int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev) -{ - int ret = 0; - - if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) - return ret; - - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); - return ret; - } - - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); - return ret; - } - - ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) { - DRM_ERROR("failed to create device file for dpm state\n"); - return ret; - } - - return ret; -} - -void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev) -{ - if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) - return; - - device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); -} - int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e32ae906d797..103033f96f13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -379,54 +379,3 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) } } } - -static uint32_t parse_clk(char *buf, bool min) -{ - char *ptr = buf; - uint32_t clk = 0; - - do { - ptr = strchr(ptr, ':'); - if (!ptr) - break; - ptr+=2; - if (kstrtou32(ptr, 10, &clk)) - return 0; - } while (!min); - - return clk * 100; -} - -uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest) -{ - char *buf = NULL; - uint32_t clk = 0; - - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); - clk = parse_clk(buf, lowest); - - kfree(buf); - - return clk; -} - -uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) -{ - char *buf = NULL; - uint32_t clk = 0; - - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); - clk = parse_clk(buf, lowest); - - kfree(buf); - - return clk; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b0b2bdc750df..4d1ac7612967 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -57,8 +57,6 @@ struct amdgpu_virt_ops { int (*reset_gpu)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev); void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); - int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf); - int (*force_dpm_level)(struct amdgpu_device *adev, u32 level); }; /* @@ -85,8 +83,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, /* VRAM LOST by GIM */ AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, - /* HW PERF SIM in GIM */ - AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3), + /* PP ONE VF MODE in GIM */ + AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), }; struct amd_sriov_msg_pf2vf_info_header { @@ -257,8 +255,6 @@ struct amdgpu_virt { struct amdgpu_vf_error_buffer vf_errors; struct amdgpu_virt_fw_reserve fw_reserve; uint32_t gim_feature; - /* protect DPM events to GIM */ - struct mutex dpm_mutex; uint32_t reg_access_mode; }; @@ -286,8 +282,8 @@ static inline bool is_virtual_machine(void) #endif } -#define amdgim_is_hwperf(adev) \ - ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION) +#define amdgpu_sriov_is_pp_one_vf(adev) \ + ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); @@ -306,6 +302,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, unsigned int key, unsigned int chksum); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); -uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); -uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index cc5bf595f9b1..43305afa3d6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -158,82 +158,6 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, xgpu_ai_mailbox_set_valid(adev, false); } -static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf) -{ - int r = 0; - u32 req, val, size; - - if (!amdgim_is_hwperf(adev) || buf == NULL) - return -EBADRQC; - - switch(type) { - case PP_SCLK: - req = IDH_IRQ_GET_PP_SCLK; - break; - case PP_MCLK: - req = IDH_IRQ_GET_PP_MCLK; - break; - default: - return -EBADRQC; - } - - mutex_lock(&adev->virt.dpm_mutex); - - xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); - - r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); - if (!r && adev->fw_vram_usage.va != NULL) { - val = RREG32_NO_KIQ( - SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1)); - size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) + - val), PAGE_SIZE); - - if (size < PAGE_SIZE) - strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val)); - else - size = 0; - - r = size; - goto out; - } - - r = xgpu_ai_poll_msg(adev, IDH_FAIL); - if(r) - pr_info("%s DPM request failed", - (type == PP_SCLK)? "SCLK" : "MCLK"); - -out: - mutex_unlock(&adev->virt.dpm_mutex); - return r; -} - -static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level) -{ - int r = 0; - u32 req = IDH_IRQ_FORCE_DPM_LEVEL; - - if (!amdgim_is_hwperf(adev)) - return -EBADRQC; - - mutex_lock(&adev->virt.dpm_mutex); - xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0); - - r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); - if (!r) - goto out; - - r = xgpu_ai_poll_msg(adev, IDH_FAIL); - if (!r) - pr_info("DPM request failed"); - else - pr_info("Mailbox is broken"); - -out: - mutex_unlock(&adev->virt.dpm_mutex); - return r; -} - static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, enum idh_request req) { @@ -455,6 +379,4 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .reset_gpu = xgpu_ai_request_reset, .wait_reset = NULL, .trans_msg = xgpu_ai_mailbox_trans_msg, - .get_pp_clk = xgpu_ai_get_pp_clk, - .force_dpm_level = xgpu_ai_force_dpm_level, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index 077e91a33d62..37dbe0f2142f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -35,10 +35,6 @@ enum idh_request { IDH_REL_GPU_FINI_ACCESS, IDH_REQ_GPU_RESET_ACCESS, - IDH_IRQ_FORCE_DPM_LEVEL = 10, - IDH_IRQ_GET_PP_SCLK, - IDH_IRQ_GET_PP_MCLK, - IDH_LOG_VF_ERROR = 200, }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 09dca499a880..5bd6ae7a52fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -775,11 +775,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) } amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (!amdgpu_sriov_vf(adev)) { - if (is_support_sw_smu(adev)) + if (is_support_sw_smu(adev)) { + if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - else - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); + } else { + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); } if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 7932eb163a00..5087d6bdba60 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -48,7 +48,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev) hwmgr->adev = adev; hwmgr->not_vf = !amdgpu_sriov_vf(adev); - hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false; hwmgr->device = amdgpu_cgs_create_device(adev); mutex_init(&hwmgr->smu_lock); hwmgr->chip_family = adev->family; @@ -276,6 +275,9 @@ static int pp_dpm_load_fw(void *handle) { struct pp_hwmgr *hwmgr = handle; + if (!hwmgr->not_vf) + return 0; + if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index cc57fb953e62..253860d30b20 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -81,8 +81,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) adev = hwmgr->adev; /* Skip for suspend/resume case */ - if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev) - && adev->in_suspend) { + if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr) + && !amdgpu_passthrough(adev) && adev->in_suspend) { pr_info("dpm has been enabled\n"); return 0; } @@ -200,6 +200,9 @@ int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); + if (!hwmgr->not_vf) + return 0; + if (hwmgr->hwmgr_func->stop_thermal_controller == NULL) return -EINVAL; @@ -237,6 +240,9 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr) TEMP_RANGE_MAX}; struct amdgpu_device *adev = hwmgr->adev; + if (!hwmgr->not_vf) + return 0; + if (hwmgr->hwmgr_func->get_thermal_temperature_range) hwmgr->hwmgr_func->get_thermal_temperature_range( hwmgr, &range); @@ -263,6 +269,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr) bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); + if (hwmgr->pp_one_vf) + return false; if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) return false; @@ -482,6 +490,9 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); + if (!hwmgr->not_vf) + return 0; + if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index d2909c91d65b..e2b82c902948 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -221,6 +221,9 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr) { int ret = 0; + hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev); + hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf)) + ? true : false; if (!hwmgr->pm_en) return 0; @@ -279,6 +282,9 @@ err: int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) { + if (!hwmgr->not_vf) + return 0; + if (!hwmgr || !hwmgr->pm_en) return 0; @@ -299,6 +305,9 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr) { int ret = 0; + if (!hwmgr->not_vf) + return 0; + if (!hwmgr || !hwmgr->pm_en) return 0; @@ -318,6 +327,9 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr) { int ret = 0; + if (!hwmgr->not_vf) + return 0; + if (!hwmgr) return -EINVAL; @@ -365,6 +377,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, switch (task_id) { case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: + if (!hwmgr->not_vf) + return ret; ret = phm_pre_display_configuration_changed(hwmgr); if (ret) return ret; @@ -381,6 +395,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, enum PP_StateUILabel requested_ui_label; struct pp_power_state *requested_ps = NULL; + if (!hwmgr->not_vf) + return ret; if (user_state == NULL) { ret = -EINVAL; break; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 6bf48934fdc4..31a32a79cfc2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -262,20 +262,22 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set uint32_t index; long workload; - if (!skip_display_settings) - phm_display_configuration_changed(hwmgr); - - if (hwmgr->ps) - power_state_management(hwmgr, new_ps); - else - /* - * for vega12/vega20 which does not support power state manager - * DAL clock limits should also be honoured - */ - phm_apply_clock_adjust_rules(hwmgr); - - if (!skip_display_settings) - phm_notify_smc_display_config_after_ps_adjustment(hwmgr); + if (hwmgr->not_vf) { + if (!skip_display_settings) + phm_display_configuration_changed(hwmgr); + + if (hwmgr->ps) + power_state_management(hwmgr, new_ps); + else + /* + * for vega12/vega20 which does not support power state manager + * DAL clock limits should also be honoured + */ + phm_apply_clock_adjust_rules(hwmgr); + + if (!skip_display_settings) + phm_notify_smc_display_config_after_ps_adjustment(hwmgr); + } if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) hwmgr->dpm_level = hwmgr->request_dpm_level; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 4685193eca87..148446570e21 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -912,6 +912,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->platform_descriptor.clockStep.memoryClock = 500; data->total_active_cus = adev->gfx.cu_info.number; + if (!hwmgr->not_vf) + return result; + /* Setup default Overdrive Fan control settings */ data->odn_fan_table.target_fan_speed = hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM; @@ -979,6 +982,9 @@ static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr) static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr) { + if (!hwmgr->not_vf) + return 0; + PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr), "Failed to init sclk threshold!", return -EINVAL); @@ -2503,6 +2509,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to setup default DPM tables!", return result); + if (!hwmgr->not_vf) + return 0; + /* initialize ODN table */ if (hwmgr->od_enabled) { if (odn_table->max_vddc) { @@ -2826,6 +2835,8 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) struct vega10_hwmgr *data = hwmgr->backend; uint32_t i, feature_mask = 0; + if (!hwmgr->not_vf) + return 0; if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, @@ -2932,61 +2943,73 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = hwmgr->backend; int tmp_result, result = 0; - vega10_enable_disable_PCC_limit_feature(hwmgr, true); - - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); + if (hwmgr->not_vf) { + vega10_enable_disable_PCC_limit_feature(hwmgr, true); - tmp_result = vega10_construct_voltage_tables(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to construct voltage tables!", - result = tmp_result); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); - tmp_result = vega10_init_smc_table(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to initialize SMC table!", - result = tmp_result); + tmp_result = vega10_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to construct voltage tables!", + result = tmp_result); + } - if (PP_CAP(PHM_PlatformCaps_ThermalController)) { - tmp_result = vega10_enable_thermal_protection(hwmgr); + if (hwmgr->not_vf || hwmgr->pp_one_vf) { + tmp_result = vega10_init_smc_table(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to enable thermal protection!", - result = tmp_result); + "Failed to initialize SMC table!", + result = tmp_result); } - tmp_result = vega10_enable_vrhot_feature(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to enable VR hot feature!", - result = tmp_result); + if (hwmgr->not_vf) { + if (PP_CAP(PHM_PlatformCaps_ThermalController)) { + tmp_result = vega10_enable_thermal_protection(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable thermal protection!", + result = tmp_result); + } - tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to enable deep sleep master switch!", - result = tmp_result); + tmp_result = vega10_enable_vrhot_feature(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable VR hot feature!", + result = tmp_result); - tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to start DPM!", result = tmp_result); + tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable deep sleep master switch!", + result = tmp_result); + } + + if (hwmgr->not_vf) { + tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to start DPM!", result = tmp_result); + } - /* enable didt, do not abort if failed didt */ - tmp_result = vega10_enable_didt_config(hwmgr); - PP_ASSERT(!tmp_result, - "Failed to enable didt config!"); + if (hwmgr->not_vf) { + /* enable didt, do not abort if failed didt */ + tmp_result = vega10_enable_didt_config(hwmgr); + PP_ASSERT(!tmp_result, + "Failed to enable didt config!"); + } tmp_result = vega10_enable_power_containment(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to enable power containment!", - result = tmp_result); + "Failed to enable power containment!", + result = tmp_result); - tmp_result = vega10_power_control_set_level(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to power control set level!", - result = tmp_result); + if (hwmgr->not_vf) { + tmp_result = vega10_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to power control set level!", + result = tmp_result); - tmp_result = vega10_enable_ulv(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to enable ULV!", - result = tmp_result); + tmp_result = vega10_enable_ulv(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable ULV!", + result = tmp_result); + } return result; } @@ -3080,11 +3103,22 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, performance_level->soc_clock = socclk_dep_table->entries [state_entry->ucSocClockIndexHigh].ulClk; if (gfxclk_dep_table->ucRevId == 0) { - performance_level->gfx_clock = gfxclk_dep_table->entries - [state_entry->ucGfxClockIndexHigh].ulClk; + /* under vega10 pp one vf mode, the gfx clk dpm need be lower + * to level-4 due to the limited 110w-power + */ + if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0)) + performance_level->gfx_clock = + gfxclk_dep_table->entries[4].ulClk; + else + performance_level->gfx_clock = gfxclk_dep_table->entries + [state_entry->ucGfxClockIndexHigh].ulClk; } else if (gfxclk_dep_table->ucRevId == 1) { patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; - performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk; + if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0)) + performance_level->gfx_clock = patom_record_V2[4].ulClk; + else + performance_level->gfx_clock = + patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk; } performance_level->mem_clock = mclk_dep_table->entries @@ -3495,6 +3529,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinGfxclkByIndex, data->smc_state_table.gfx_boot_level); + data->dpm_table.gfx_table.dpm_state.soft_min_level = data->smc_state_table.gfx_boot_level; } @@ -3518,6 +3553,9 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) } } + if (!hwmgr->not_vf) + return 0; + if (!data->registry_data.socclk_dpm_key_disabled) { if (data->smc_state_table.soc_boot_level != data->dpm_table.soc_table.dpm_state.soft_min_level) { @@ -3560,6 +3598,9 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) } } + if (!hwmgr->not_vf) + return 0; + if (!data->registry_data.socclk_dpm_key_disabled) { if (data->smc_state_table.soc_max_level != data->dpm_table.soc_table.dpm_state.soft_max_level) { @@ -4054,15 +4095,25 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { *mclk_mask = 0; } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { - *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; + /* under vega10 pp one vf mode, the gfx clk dpm need be lower + * to level-4 due to the limited power + */ + if (hwmgr->pp_one_vf) + *sclk_mask = 4; + else + *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; *soc_mask = table_info->vdd_dep_on_socclk->count - 1; *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; } + return 0; } static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { + if (!hwmgr->not_vf) + return; + switch (mode) { case AMD_FAN_CTRL_NONE: vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100); @@ -4176,6 +4227,9 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, break; } + if (!hwmgr->not_vf) + return ret; + if (!ret) { if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); @@ -4479,7 +4533,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; - int i, now, size = 0; + int i, now, size = 0, count = 0; switch (type) { case PP_SCLK: @@ -4489,7 +4543,12 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); now = smum_get_argument(hwmgr); - for (i = 0; i < sclk_table->count; i++) + if (hwmgr->pp_one_vf && + (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) + count = 5; + else + count = sclk_table->count; + for (i = 0; i < count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); @@ -4700,6 +4759,9 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; + if (!hwmgr->not_vf) + return 0; + if (PP_CAP(PHM_PlatformCaps_ThermalController)) vega10_disable_thermal_protection(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index 6f26cb241ecc..0a677d4bc87b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -1343,6 +1343,9 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr) hwmgr->default_power_limit = hwmgr->power_limit = (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit); + if (!hwmgr->not_vf) + return 0; + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->smu_features[GNLD_PPT].supported) PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index af977675fd33..2ffb666b97e6 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -741,6 +741,7 @@ struct pp_hwmgr { uint32_t smu_version; bool not_vf; bool pm_en; + bool pp_one_vf; struct mutex smu_lock; uint32_t pp_table_version; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c index 742b3dc1f6cb..adfbcbe5d113 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c @@ -61,15 +61,29 @@ static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr) uint32_t reg; uint32_t ret; - reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); + /* Due to the L1 policy problem under SRIOV, we have to use + * mmMP1_SMN_C2PMSG_103 as the driver response register + */ + if (hwmgr->pp_one_vf) { + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_103); - ret = phm_wait_for_register_unequal(hwmgr, reg, - 0, MP1_C2PMSG_90__CONTENT_MASK); + ret = phm_wait_for_register_unequal(hwmgr, reg, + 0, MP1_C2PMSG_103__CONTENT_MASK); - if (ret) - pr_err("No response from smu\n"); + if (ret) + pr_err("No response from smu\n"); - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103); + } else { + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); + + ret = phm_wait_for_register_unequal(hwmgr, reg, + 0, MP1_C2PMSG_90__CONTENT_MASK); + + if (ret) + pr_err("No response from smu\n"); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); + } } /* @@ -83,7 +97,11 @@ static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, { struct amdgpu_device *adev = hwmgr->adev; - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); + if (hwmgr->pp_one_vf) { + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_101, msg); + } else { + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); + } return 0; } @@ -101,7 +119,10 @@ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) smu9_wait_for_response(hwmgr); - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + if (hwmgr->pp_one_vf) + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0); + else + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); smu9_send_msg_to_smc_without_waiting(hwmgr, msg); @@ -127,9 +148,17 @@ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, smu9_wait_for_response(hwmgr); - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); + /* Due to the L1 policy problem under SRIOV, we have to use + * mmMP1_SMN_C2PMSG_101 as the driver message register and + * mmMP1_SMN_C2PMSG_102 as the driver parameter register. + */ + if (hwmgr->pp_one_vf) { + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102, parameter); + } else { + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); + } smu9_send_msg_to_smc_without_waiting(hwmgr, msg); @@ -144,5 +173,8 @@ uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = hwmgr->adev; - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); + if (hwmgr->pp_one_vf) + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102); + else + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 0f3836fd9666..39427ca32a15 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -71,6 +71,12 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, { struct vega10_smumgr *priv = hwmgr->smu_backend; + /* under sriov, vbios or hypervisor driver + * has already copy table to smc so here only skip it + */ + if (!hwmgr->not_vf) + return 0; + PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, @@ -100,6 +106,14 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, int msg = enable ? PPSMC_MSG_EnableSmuFeatures : PPSMC_MSG_DisableSmuFeatures; + /* VF has no permission to change smu feature due + * to security concern even under pp one vf mode + * it still can't do it. For vega10, the smu in + * vbios will enable the appropriate features. + * */ + if (!hwmgr->not_vf) + return 0; + return smum_send_msg_to_smc_with_parameter(hwmgr, msg, feature_mask); } -- cgit From d4117354c8d3dbf754a4706a808fb9e45ce01168 Mon Sep 17 00:00:00 2001 From: "Tianci.Yin" Date: Wed, 11 Dec 2019 10:43:07 +0800 Subject: drm/amdgpu/gfx10: update gfx golden settings add registers: mmSPI_CONFIG_CNTL Reviewed-by: Feifei Xu Signed-off-by: Tianci.Yin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 5744b212db86..d96eabca9538 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -118,6 +118,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), -- cgit From 5f5202bf695b344b4c13de3609b7c0805f89b898 Mon Sep 17 00:00:00 2001 From: "Tianci.Yin" Date: Wed, 11 Dec 2019 10:52:14 +0800 Subject: drm/amdgpu/gfx10: update gfx golden settings for navi14 add registers: mmSPI_CONFIG_CNTL Reviewed-by: Feifei Xu Signed-off-by: Tianci.Yin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d96eabca9538..caa43b3a3dbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -164,6 +164,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), -- cgit From d549991ce5d5194d89eafe16085603b38e7cf989 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Wed, 11 Dec 2019 17:30:26 +0800 Subject: drm/amdgpu: enable gfxoff feature for navi10 asic enable gfxoff feature for some navi10 asics Signed-off-by: Kevin Wang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index caa43b3a3dbf..5f54b6dac6f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -610,11 +610,29 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev) le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); } +static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev) +{ + bool ret = false; + + switch (adev->pdev->revision) { + case 0xc2: + case 0xc3: + ret = true; + break; + default: + ret = false; + break; + } + + return ret ; +} + static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_NAVI10: - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + if (!gfx_v10_0_navi10_gfxoff_should_enable(adev)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; break; default: break; -- cgit From eaec03f20663b30440dd8301202aba1eaf45d996 Mon Sep 17 00:00:00 2001 From: "Tianci.Yin" Date: Wed, 11 Dec 2019 19:55:49 +0800 Subject: drm/amdgpu/gfx10: update gfx golden settings add registers: mmPA_SC_BINNER_TIMEOUT_COUNTER and mmPA_SC_ENHANCE_2 Reviewed-by: Feifei Xu Signed-off-by: Tianci.Yin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 5f54b6dac6f7..558b998b3da5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -114,8 +114,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104), -- cgit From 89ed5a52112cf46dc495a6cd4ddff81c029e1f75 Mon Sep 17 00:00:00 2001 From: "Tianci.Yin" Date: Wed, 11 Dec 2019 19:57:43 +0800 Subject: drm/amdgpu/gfx10: update gfx golden settings for navi14 add registers: mmPA_SC_BINNER_TIMEOUT_COUNTER and mmPA_SC_ENHANCE_2 Reviewed-by: Feifei Xu Signed-off-by: Tianci.Yin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 558b998b3da5..98db25215d0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -162,8 +162,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105), -- cgit From 21a174f5adb35cbc20b1a86c84ae0da21ae77219 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 9 Dec 2019 12:43:47 -0500 Subject: drm/amdgpu: fix VCN2.x number of irq types The JPEG irq type has been moved to its own structure Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Reviewed-by: James Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 5649190cb629..d76ece38c97b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1788,7 +1788,7 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = { static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 42d6b9f0553b..f67fca38c1a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -1138,7 +1138,7 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; } } -- cgit From 3504bd45a99eae374ae54327bf7a6ebf1c8de15a Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 9 Dec 2019 12:58:24 -0500 Subject: drm/amdgpu: fix JPEG instance checking when ctx init Use proper structure. Fixes: 0388aee766376ed ("drm/amdgpu: use the JPEG structure for general driver support") Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Reviewed-by: James Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 8f2eea92d67c..1d2bbf10614e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -170,7 +170,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, break; case AMDGPU_HW_IP_VCN_JPEG: for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) { - if (adev->vcn.harvest_config & (1 << j)) + if (adev->jpeg.harvest_config & (1 << j)) continue; rings[num_rings++] = &adev->jpeg.inst[j].ring_dec; } -- cgit From aec434023e528bdc8a914458fc729c62eb5e6139 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 10 Dec 2019 20:59:24 +0100 Subject: drm/amd/display: include linux/slab.h where needed Calling kzalloc() and related functions requires the linux/slab.h header to be included: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn21/dcn21_resource.c: In function 'dcn21_ipp_create': drivers/gpu/drm/amd/amdgpu/../display/dc/dcn21/dcn21_resource.c:679:3: error: implicit declaration of function 'kzalloc'; did you mean 'd_alloc'? [-Werror=implicit-function-declaration] kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); A lot of other headers also miss a direct include in this file, but this is the only one that causes a problem for now. Signed-off-by: Arnd Bergmann Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 8fa63929d3b9..c865b95d5c0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -23,6 +23,8 @@ * */ +#include + #include "dm_services.h" #include "dc.h" -- cgit From 93b09a9a892ba07bd99132d9bb6318e31c7f2f79 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Wed, 11 Dec 2019 10:21:01 +0000 Subject: drm/amdgpu: log when amdgpu.dc=1 but ASIC is unsupported This makes it easier to figure out whether the kernel parameter has been taken into account. Signed-off-by: Simon Ser Cc: Harry Wentland Cc: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 95988ff7356d..a97946878024 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2636,6 +2636,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) return amdgpu_dc != 0; #endif default: + if (amdgpu_dc > 0) + DRM_INFO("Display Core has been requested via kernel parameter " + "but isn't supported by ASIC, ignoring\n"); return false; } } -- cgit From ad808910be68dcf8da5d837d4511d00ad5d3678a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 11 Dec 2019 10:09:03 -0500 Subject: drm/amdgpu: fix license on Kconfig and Makefiles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit amdgpu is MIT licensed. Fixes: ec8f24b7faaf3d ("treewide: Add SPDX license identifier - Makefile/Kconfig") Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/acp/Kconfig | 2 +- drivers/gpu/drm/amd/amdgpu/Kconfig | 2 +- drivers/gpu/drm/amd/amdkfd/Kconfig | 2 +- drivers/gpu/drm/amd/display/Kconfig | 2 +- drivers/gpu/drm/amd/display/dc/dcn20/Makefile | 1 + drivers/gpu/drm/amd/display/dc/dcn21/Makefile | 1 + drivers/gpu/drm/amd/display/dc/dsc/Makefile | 1 + 7 files changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index 19bae9100da4..13340f353ea8 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT menu "ACP (Audio CoProcessor) Configuration" config DRM_AMD_ACP diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 2e98c016cb47..9375e7f12420 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT config DRM_AMDGPU_SI bool "Enable amdgpu support for SI parts" depends on DRM_AMDGPU diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index ba0e68057a89..b3672d10ea54 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT # # Heterogenous system architecture configuration # diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 78f40690a109..096db863c345 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT menu "Display Engine Configuration" depends on DRM && DRM_AMDGPU diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index 1f94b8bc68b8..bb8a3b92d024 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: MIT # # Makefile for DCN. diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index cb839b0adb9c..f8fb37278717 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: MIT # # Makefile for DCN21. diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index b456cd23c6fa..afc76002eacc 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: MIT # # Makefile for the 'dsc' sub-component of DAL. -- cgit