diff options
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_dmc.c')
-rw-r--r-- | drivers/gpu/drm/i915/display/intel_dmc.c | 531 |
1 files changed, 403 insertions, 128 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index b58189d24e7e..744f51c0eab8 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -24,14 +24,22 @@ #include <linux/debugfs.h> #include <linux/firmware.h> +#include <drm/drm_vblank.h> + +#include <drm/drm_file.h> +#include <drm/drm_print.h> -#include "i915_drv.h" #include "i915_reg.h" +#include "i915_utils.h" +#include "intel_crtc.h" #include "intel_de.h" -#include "intel_display_rpm.h" #include "intel_display_power_well.h" +#include "intel_display_regs.h" +#include "intel_display_rpm.h" +#include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dmc_regs.h" +#include "intel_flipq.h" #include "intel_step.h" /** @@ -176,7 +184,8 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size const char *fw_path = NULL; u32 max_fw_size = 0; - if (DISPLAY_VERx100(display) == 3000) { + if (DISPLAY_VERx100(display) == 3002 || + DISPLAY_VERx100(display) == 3000) { fw_path = XE3LPD_DMC_PATH; max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; } else if (DISPLAY_VERx100(display) == 2000) { @@ -425,29 +434,26 @@ static void disable_event_handler(struct intel_display *display, REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, DMC_EVT_CTL_TYPE_EDGE_0_1) | REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, - DMC_EVT_CTL_EVENT_ID_FALSE)); + DMC_EVENT_FALSE)); intel_de_write(display, htp_reg, 0); } -static void disable_all_event_handlers(struct intel_display *display) +static void disable_all_event_handlers(struct intel_display *display, + enum intel_dmc_id dmc_id) { - enum intel_dmc_id dmc_id; + int handler; /* TODO: disable the event handlers on pre-GEN12 platforms as well */ if (DISPLAY_VER(display) < 12) return; - for_each_dmc_id(dmc_id) { - int handler; - - if (!has_dmc_id_fw(display, dmc_id)) - continue; + if (!has_dmc_id_fw(display, dmc_id)) + return; - for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) - disable_event_handler(display, - DMC_EVT_CTL(display, dmc_id, handler), - DMC_EVT_HTP(display, dmc_id, handler)); - } + for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) + disable_event_handler(display, + DMC_EVT_CTL(display, dmc_id, handler), + DMC_EVT_HTP(display, dmc_id, handler)); } static void adlp_pipedmc_clock_gating_wa(struct intel_display *display, bool enable) @@ -479,89 +485,36 @@ static void mtl_pipedmc_clock_gating_wa(struct intel_display *display) * for pipe A and B. */ intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, - MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B); + MTL_PIPEDMC_GATING_DIS(PIPE_A) | + MTL_PIPEDMC_GATING_DIS(PIPE_B)); } static void pipedmc_clock_gating_wa(struct intel_display *display, bool enable) { - if (DISPLAY_VER(display) >= 14 && enable) + if (display->platform.meteorlake && enable) mtl_pipedmc_clock_gating_wa(display); else if (DISPLAY_VER(display) == 13) adlp_pipedmc_clock_gating_wa(display, enable); } -void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe) +static u32 pipedmc_interrupt_mask(struct intel_display *display) { - enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); - - if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) - return; - - if (DISPLAY_VER(display) >= 14) - intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); - else - intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); -} - -void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe) -{ - enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); - - if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) - return; - - if (DISPLAY_VER(display) >= 14) - intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); - else - intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); -} - -/** - * intel_dmc_block_pkgc() - block PKG C-state - * @display: display instance - * @pipe: pipe which register use to block - * @block: block/unblock - * - * This interface is target for Wa_16025596647 usage. I.e. to set/clear - * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register. - */ -void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe, - bool block) -{ - intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe), - PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ? - PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0); + /* + * FIXME PIPEDMC_ERROR not enabled for now due to LNL pipe B + * triggering it during the first DC state transition. Figure + * out what is going on... + */ + return PIPEDMC_FLIPQ_PROG_DONE | + PIPEDMC_GTT_FAULT | + PIPEDMC_ATS_FAULT; } -/** - * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG - * C-state exit - * @display: display instance - * @pipe: pipe which register use to block - * @enable: enable/disable - * - * This interface is target for Wa_16025596647 usage. I.e. start the package C - * exit at the start of the undelayed vblank - */ -void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display, - enum pipe pipe, bool enable) +static u32 dmc_evt_ctl_disable(void) { - u32 val; - - if (enable) - val = DMC_EVT_CTL_ENABLE | DMC_EVT_CTL_RECURRING | - REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, - DMC_EVT_CTL_TYPE_EDGE_0_1) | - REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, - DMC_EVT_CTL_EVENT_ID_VBLANK_A); - else - val = REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, - DMC_EVT_CTL_EVENT_ID_FALSE) | - REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, - DMC_EVT_CTL_TYPE_EDGE_0_1); - - intel_de_write(display, MTL_PIPEDMC_EVT_CTL_4(pipe), - val); + return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, + DMC_EVT_CTL_TYPE_EDGE_0_1) | + REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, + DMC_EVENT_FALSE); } static bool is_dmc_evt_ctl_reg(struct intel_display *display, @@ -584,6 +537,15 @@ static bool is_dmc_evt_htp_reg(struct intel_display *display, return offset >= start && offset < end; } +static bool is_event_handler(struct intel_display *display, + enum intel_dmc_id dmc_id, + unsigned int event_id, + i915_reg_t reg, u32 data) +{ + return is_dmc_evt_ctl_reg(display, dmc_id, reg) && + REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id; +} + static bool disable_dmc_evt(struct intel_display *display, enum intel_dmc_id dmc_id, i915_reg_t reg, u32 data) @@ -597,12 +559,12 @@ static bool disable_dmc_evt(struct intel_display *display, /* also disable the flip queue event on the main DMC on TGL */ if (display->platform.tigerlake && - REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC) + is_event_handler(display, dmc_id, MAINDMC_EVENT_CLK_MSEC, reg, data)) return true; /* also disable the HRR event on the main DMC on TGL/ADLS */ if ((display->platform.tigerlake || display->platform.alderlake_s) && - REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A) + is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data)) return true; return false; @@ -615,14 +577,267 @@ static u32 dmc_mmiodata(struct intel_display *display, if (disable_dmc_evt(display, dmc_id, dmc->dmc_info[dmc_id].mmioaddr[i], dmc->dmc_info[dmc_id].mmiodata[i])) - return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, - DMC_EVT_CTL_TYPE_EDGE_0_1) | - REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, - DMC_EVT_CTL_EVENT_ID_FALSE); + return dmc_evt_ctl_disable(); else return dmc->dmc_info[dmc_id].mmiodata[i]; } +static void dmc_load_mmio(struct intel_display *display, enum intel_dmc_id dmc_id) +{ + struct intel_dmc *dmc = display_to_dmc(display); + int i; + + for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { + intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i], + dmc_mmiodata(display, dmc, dmc_id, i)); + } +} + +static void dmc_load_program(struct intel_display *display, enum intel_dmc_id dmc_id) +{ + struct intel_dmc *dmc = display_to_dmc(display); + int i; + + disable_all_event_handlers(display, dmc_id); + + preempt_disable(); + + for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { + intel_de_write_fw(display, + DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), + dmc->dmc_info[dmc_id].payload[i]); + } + + preempt_enable(); + + dmc_load_mmio(display, dmc_id); +} + +static void assert_dmc_loaded(struct intel_display *display, + enum intel_dmc_id dmc_id) +{ + struct intel_dmc *dmc = display_to_dmc(display); + u32 expected, found; + int i; + + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) + return; + + found = intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, 0)); + expected = dmc->dmc_info[dmc_id].payload[0]; + + drm_WARN(display->drm, found != expected, + "DMC %d program storage start incorrect (expected 0x%x, current 0x%x)\n", + dmc_id, expected, found); + + for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { + i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i]; + + found = intel_de_read(display, reg); + expected = dmc_mmiodata(display, dmc, dmc_id, i); + + /* once set DMC_EVT_CTL_ENABLE can't be cleared :/ */ + if (is_dmc_evt_ctl_reg(display, dmc_id, reg)) { + found &= ~DMC_EVT_CTL_ENABLE; + expected &= ~DMC_EVT_CTL_ENABLE; + } + + drm_WARN(display->drm, found != expected, + "DMC %d mmio[%d]/0x%x incorrect (expected 0x%x, current 0x%x)\n", + dmc_id, i, i915_mmio_reg_offset(reg), expected, found); + } +} + +void assert_main_dmc_loaded(struct intel_display *display) +{ + assert_dmc_loaded(display, DMC_FW_MAIN); +} + +static bool need_pipedmc_load_program(struct intel_display *display) +{ + /* On TGL/derivatives pipe DMC state is lost when PG1 is disabled */ + return DISPLAY_VER(display) == 12; +} + +static bool need_pipedmc_load_mmio(struct intel_display *display, enum pipe pipe) +{ + /* + * PTL: + * - pipe A/B DMC doesn't need save/restore + * - pipe C/D DMC is in PG0, needs manual save/restore + */ + if (DISPLAY_VER(display) == 30) + return pipe >= PIPE_C; + + /* + * FIXME LNL unclear, main DMC firmware has the pipe DMC A/B PG0 + * save/restore, but so far unable to see the loss of pipe DMC state + * in action. Are we just failing to turn off PG0 due to some other + * SoC level stuff? + */ + if (DISPLAY_VER(display) == 20) + return false; + + /* + * FIXME BMG untested, main DMC firmware has the + * pipe DMC A/B PG0 save/restore... + */ + if (display->platform.battlemage) + return false; + + /* + * DG2: + * - Pipe DMCs presumably in PG0? + * - No DC6, and even DC9 doesn't seem to result + * in loss of DMC state for whatever reason + */ + if (display->platform.dg2) + return false; + + /* + * ADL/MTL: + * - pipe A/B DMC is in PG0, saved/restored by the main DMC + * - pipe C/D DMC is in PG0, needs manual save/restore + */ + if (IS_DISPLAY_VER(display, 13, 14)) + return pipe >= PIPE_C; + + return false; +} + +static bool can_enable_pipedmc(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + /* + * On TGL/derivatives pipe DMC state is lost when PG1 is disabled. + * Do not even enable the pipe DMC when that can happen outside + * of driver control (PSR+DC5/6). + */ + if (DISPLAY_VER(display) == 12 && crtc_state->has_psr) + return false; + + return true; +} + +void intel_dmc_enable_pipe(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum pipe pipe = crtc->pipe; + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); + + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) + return; + + if (!can_enable_pipedmc(crtc_state)) { + intel_dmc_disable_pipe(crtc_state); + return; + } + + if (need_pipedmc_load_program(display)) + dmc_load_program(display, dmc_id); + else if (need_pipedmc_load_mmio(display, pipe)) + dmc_load_mmio(display, dmc_id); + + assert_dmc_loaded(display, dmc_id); + + if (DISPLAY_VER(display) >= 20) { + intel_flipq_reset(display, pipe); + + intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display)); + intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~pipedmc_interrupt_mask(display)); + } + + if (DISPLAY_VER(display) >= 14) + intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); + else + intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); +} + +void intel_dmc_disable_pipe(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum pipe pipe = crtc->pipe; + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); + + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) + return; + + if (DISPLAY_VER(display) >= 14) + intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); + else + intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); + + if (DISPLAY_VER(display) >= 20) { + intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~0); + intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display)); + + intel_flipq_reset(display, pipe); + } +} + +static void dmc_configure_event(struct intel_display *display, + enum intel_dmc_id dmc_id, + unsigned int event_id, + bool enable) +{ + struct intel_dmc *dmc = display_to_dmc(display); + int num_handlers = 0; + int i; + + for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { + i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i]; + u32 data = dmc->dmc_info[dmc_id].mmiodata[i]; + + if (!is_event_handler(display, dmc_id, event_id, reg, data)) + continue; + + intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable()); + num_handlers++; + } + + drm_WARN_ONCE(display->drm, num_handlers != 1, + "DMC %d has %d handlers for event 0x%x\n", + dmc_id, num_handlers, event_id); +} + +/** + * intel_dmc_block_pkgc() - block PKG C-state + * @display: display instance + * @pipe: pipe which register use to block + * @block: block/unblock + * + * This interface is target for Wa_16025596647 usage. I.e. to set/clear + * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register. + */ +void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe, + bool block) +{ + intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe), + PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ? + PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0); +} + +/** + * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG + * C-state exit + * @display: display instance + * @pipe: pipe which register use to block + * @enable: enable/disable + * + * This interface is target for Wa_16025596647 usage. I.e. start the package C + * exit at the start of the undelayed vblank + */ +void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display, + enum pipe pipe, bool enable) +{ + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); + + dmc_configure_event(display, dmc_id, PIPEDMC_EVENT_VBLANK, enable); +} + /** * intel_dmc_load_program() - write the firmware from memory to register. * @display: display instance @@ -634,37 +849,26 @@ static u32 dmc_mmiodata(struct intel_display *display, void intel_dmc_load_program(struct intel_display *display) { struct i915_power_domains *power_domains = &display->power.domains; - struct intel_dmc *dmc = display_to_dmc(display); enum intel_dmc_id dmc_id; - u32 i; if (!intel_dmc_has_payload(display)) return; - pipedmc_clock_gating_wa(display, true); - - disable_all_event_handlers(display); - assert_display_rpm_held(display); - preempt_disable(); + pipedmc_clock_gating_wa(display, true); for_each_dmc_id(dmc_id) { - for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { - intel_de_write_fw(display, - DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), - dmc->dmc_info[dmc_id].payload[i]); - } + dmc_load_program(display, dmc_id); + assert_dmc_loaded(display, dmc_id); } - preempt_enable(); - - for_each_dmc_id(dmc_id) { - for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { - intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i], - dmc_mmiodata(display, dmc, dmc_id, i)); - } - } + if (DISPLAY_VER(display) >= 20) + intel_de_write(display, DMC_FQ_W2_PTS_CFG_SEL, + PIPE_D_DMC_W2_PTS_CONFIG_SELECT(PIPE_D) | + PIPE_C_DMC_W2_PTS_CONFIG_SELECT(PIPE_C) | + PIPE_B_DMC_W2_PTS_CONFIG_SELECT(PIPE_B) | + PIPE_A_DMC_W2_PTS_CONFIG_SELECT(PIPE_A)); power_domains->dc_state = 0; @@ -682,26 +886,17 @@ void intel_dmc_load_program(struct intel_display *display) */ void intel_dmc_disable_program(struct intel_display *display) { + enum intel_dmc_id dmc_id; + if (!intel_dmc_has_payload(display)) return; pipedmc_clock_gating_wa(display, true); - disable_all_event_handlers(display); - pipedmc_clock_gating_wa(display, false); -} -void assert_dmc_loaded(struct intel_display *display) -{ - struct intel_dmc *dmc = display_to_dmc(display); + for_each_dmc_id(dmc_id) + disable_all_event_handlers(display, dmc_id); - drm_WARN_ONCE(display->drm, !dmc, "DMC not initialized\n"); - drm_WARN_ONCE(display->drm, dmc && - !intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), - "DMC program storage start is NULL\n"); - drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_SSP_BASE), - "DMC SSP Base Not fine\n"); - drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_HTP_SKL), - "DMC HTP Not fine\n"); + pipedmc_clock_gating_wa(display, false); } static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info, @@ -1120,7 +1315,6 @@ out: */ void intel_dmc_init(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_dmc *dmc; if (!HAS_DMC(display)) @@ -1163,7 +1357,7 @@ void intel_dmc_init(struct intel_display *display) display->dmc.dmc = dmc; drm_dbg_kms(display->drm, "Loading %s\n", dmc->fw_path); - queue_work(i915->unordered_wq, &dmc->work); + queue_work(display->wq.unordered, &dmc->work); return; @@ -1194,6 +1388,17 @@ void intel_dmc_suspend(struct intel_display *display) intel_dmc_runtime_pm_put(display); } +void intel_dmc_wait_fw_load(struct intel_display *display) +{ + struct intel_dmc *dmc = display_to_dmc(display); + + if (!HAS_DMC(display)) + return; + + if (dmc) + flush_work(&dmc->work); +} + /** * intel_dmc_resume() - init DMC firmware during system resume * @display: display instance @@ -1403,3 +1608,73 @@ void intel_dmc_debugfs_register(struct intel_display *display) debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root, display, &intel_dmc_debugfs_status_fops); } + +void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe) +{ + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); + u32 tmp = 0, int_vector; + + if (DISPLAY_VER(display) >= 20) { + tmp = intel_de_read(display, PIPEDMC_INTERRUPT(pipe)); + intel_de_write(display, PIPEDMC_INTERRUPT(pipe), tmp); + + if (tmp & PIPEDMC_FLIPQ_PROG_DONE) { + spin_lock(&display->drm->event_lock); + + if (crtc->flipq_event) { + /* + * Update vblank counter/timestamp in case it + * hasn't been done yet for this frame. + */ + drm_crtc_accurate_vblank_count(&crtc->base); + + drm_crtc_send_vblank_event(&crtc->base, crtc->flipq_event); + crtc->flipq_event = NULL; + } + + spin_unlock(&display->drm->event_lock); + } + + if (tmp & PIPEDMC_ATS_FAULT) + drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC ATS fault\n", + crtc->base.base.id, crtc->base.name); + if (tmp & PIPEDMC_GTT_FAULT) + drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC GTT fault\n", + crtc->base.base.id, crtc->base.name); + if (tmp & PIPEDMC_ERROR) + drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC error\n", + crtc->base.base.id, crtc->base.name); + } + + int_vector = intel_de_read(display, PIPEDMC_STATUS(pipe)) & PIPEDMC_INT_VECTOR_MASK; + if (tmp == 0 && int_vector != 0) + drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC interrupt vector 0x%x\n", + crtc->base.base.id, crtc->base.name, tmp); +} + +void intel_pipedmc_enable_event(struct intel_crtc *crtc, + enum pipedmc_event_id event) +{ + struct intel_display *display = to_intel_display(crtc); + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe); + + dmc_configure_event(display, dmc_id, event, true); +} + +void intel_pipedmc_disable_event(struct intel_crtc *crtc, + enum pipedmc_event_id event) +{ + struct intel_display *display = to_intel_display(crtc); + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe); + + dmc_configure_event(display, dmc_id, event, false); +} + +u32 intel_pipedmc_start_mmioaddr(struct intel_crtc *crtc) +{ + struct intel_display *display = to_intel_display(crtc); + struct intel_dmc *dmc = display_to_dmc(display); + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe); + + return dmc ? dmc->dmc_info[dmc_id].start_mmioaddr : 0; +} |