summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1717
1 files changed, 599 insertions, 1118 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index cc484b56eeaa..0e93ec201fe3 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -49,11 +49,6 @@
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
-static bool is_mmio_work(struct intel_flip_work *work)
-{
- return work->mmio_work.func;
-}
-
/* Primary plane formats for gen <= 3 */
static const uint32_t i8xx_primary_formats[] = {
DRM_FORMAT_C8,
@@ -72,6 +67,12 @@ static const uint32_t i965_primary_formats[] = {
DRM_FORMAT_XBGR2101010,
};
+static const uint64_t i9xx_format_modifiers[] = {
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static const uint32_t skl_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
@@ -87,11 +88,34 @@ static const uint32_t skl_primary_formats[] = {
DRM_FORMAT_VYUY,
};
+static const uint64_t skl_format_modifiers_noccs[] = {
+ I915_FORMAT_MOD_Yf_TILED,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const uint64_t skl_format_modifiers_ccs[] = {
+ I915_FORMAT_MOD_Yf_TILED_CCS,
+ I915_FORMAT_MOD_Y_TILED_CCS,
+ I915_FORMAT_MOD_Yf_TILED,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
/* Cursor formats */
static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
+static const uint64_t cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
@@ -1777,7 +1801,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
- assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+ assert_fdi_rx_enabled(dev_priv, PIPE_A);
/* Workaround: set timing override bit. */
val = I915_READ(TRANS_CHICKEN2(PIPE_A));
@@ -1853,16 +1877,16 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
}
-enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
WARN_ON(!crtc->config->has_pch_encoder);
if (HAS_PCH_LPT(dev_priv))
- return TRANSCODER_A;
+ return PIPE_A;
else
- return (enum transcoder) crtc->pipe;
+ return crtc->pipe;
}
/**
@@ -1901,7 +1925,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv,
- (enum pipe) intel_crtc_pch_transcoder(crtc));
+ intel_crtc_pch_transcoder(crtc));
assert_fdi_tx_pll_enabled(dev_priv,
(enum pipe) cpu_transcoder);
}
@@ -1999,11 +2023,19 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
return 128;
else
return 512;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ if (plane == 1)
+ return 128;
+ /* fall through */
case I915_FORMAT_MOD_Y_TILED:
if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (plane == 1)
+ return 128;
+ /* fall through */
case I915_FORMAT_MOD_Yf_TILED:
switch (cpp) {
case 1:
@@ -2110,7 +2142,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
/* AUX_DIST needs only 4K alignment */
- if (fb->format->format == DRM_FORMAT_NV12 && plane == 1)
+ if (plane == 1)
return 4096;
switch (fb->modifier) {
@@ -2120,6 +2152,8 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
if (INTEL_GEN(dev_priv) >= 9)
return 256 * 1024;
return 0;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
@@ -2162,6 +2196,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
*/
intel_runtime_pm_get(dev_priv);
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
if (IS_ERR(vma))
goto err;
@@ -2189,6 +2225,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
i915_vma_get(vma);
err:
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
intel_runtime_pm_put(dev_priv);
return vma;
}
@@ -2427,12 +2465,48 @@ static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
case I915_FORMAT_MOD_X_TILED:
return I915_TILING_X;
case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_CCS:
return I915_TILING_Y;
default:
return I915_TILING_NONE;
}
}
+static const struct drm_format_info ccs_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+};
+
+static const struct drm_format_info *
+lookup_format_info(const struct drm_format_info formats[],
+ int num_formats, u32 format)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static const struct drm_format_info *
+intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+{
+ switch (cmd->modifier[0]) {
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ return lookup_format_info(ccs_formats,
+ ARRAY_SIZE(ccs_formats),
+ cmd->pixel_format);
+ default:
+ return NULL;
+ }
+}
+
static int
intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct drm_framebuffer *fb)
@@ -2456,6 +2530,36 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
intel_fb_offset_to_xy(&x, &y, fb, i);
+ if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
+ int hsub = fb->format->hsub;
+ int vsub = fb->format->vsub;
+ int tile_width, tile_height;
+ int main_x, main_y;
+ int ccs_x, ccs_y;
+
+ intel_tile_dims(fb, i, &tile_width, &tile_height);
+
+ ccs_x = (x * hsub) % (tile_width * hsub);
+ ccs_y = (y * vsub) % (tile_height * vsub);
+ main_x = intel_fb->normal[0].x % (tile_width * hsub);
+ main_y = intel_fb->normal[0].y % (tile_height * vsub);
+
+ /*
+ * CCS doesn't have its own x/y offset register, so the intra CCS tile
+ * x/y offsets must match between CCS and the main surface.
+ */
+ if (main_x != ccs_x || main_y != ccs_y) {
+ DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ main_x, main_y,
+ ccs_x, ccs_y,
+ intel_fb->normal[0].x,
+ intel_fb->normal[0].y,
+ x, y);
+ return -EINVAL;
+ }
+ }
+
/*
* The fence (if used) is aligned to the start of the object
* so having the framebuffer wrap around across the edge of the
@@ -2664,20 +2768,6 @@ out_unref_obj:
return false;
}
-/* Update plane->state->fb to match plane->fb after driver-internal updates */
-static void
-update_state_fb(struct drm_plane *plane)
-{
- if (plane->fb == plane->state->fb)
- return;
-
- if (plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
- plane->state->fb = plane->fb;
- if (plane->state->fb)
- drm_framebuffer_reference(plane->state->fb);
-}
-
static void
intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
@@ -2830,6 +2920,9 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
break;
}
break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ /* FIXME AUX plane? */
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
switch (cpp) {
@@ -2852,6 +2945,44 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
return 2048;
}
+static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
+ int main_x, int main_y, u32 main_offset)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int hsub = fb->format->hsub;
+ int vsub = fb->format->vsub;
+ int aux_x = plane_state->aux.x;
+ int aux_y = plane_state->aux.y;
+ u32 aux_offset = plane_state->aux.offset;
+ u32 alignment = intel_surf_alignment(fb, 1);
+
+ while (aux_offset >= main_offset && aux_y <= main_y) {
+ int x, y;
+
+ if (aux_x == main_x && aux_y == main_y)
+ break;
+
+ if (aux_offset == 0)
+ break;
+
+ x = aux_x / hsub;
+ y = aux_y / vsub;
+ aux_offset = intel_adjust_tile_offset(&x, &y, plane_state, 1,
+ aux_offset, aux_offset - alignment);
+ aux_x = x * hsub + aux_x % hsub;
+ aux_y = y * vsub + aux_y % vsub;
+ }
+
+ if (aux_x != main_x || aux_y != main_y)
+ return false;
+
+ plane_state->aux.offset = aux_offset;
+ plane_state->aux.x = aux_x;
+ plane_state->aux.y = aux_y;
+
+ return true;
+}
+
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -2894,7 +3025,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
while ((x + w) * cpp > fb->pitches[0]) {
if (offset == 0) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
+ DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
@@ -2903,6 +3034,26 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
}
}
+ /*
+ * CCS AUX surface doesn't have its own x/y offsets, we must make sure
+ * they match with the main surface x/y offsets.
+ */
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->aux.x || y != plane_state->aux.y) {
+ DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
plane_state->main.offset = offset;
plane_state->main.x = x;
plane_state->main.y = y;
@@ -2939,6 +3090,49 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
return 0;
}
+static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int src_x = plane_state->base.src.x1 >> 16;
+ int src_y = plane_state->base.src.y1 >> 16;
+ int hsub = fb->format->hsub;
+ int vsub = fb->format->vsub;
+ int x = src_x / hsub;
+ int y = src_y / vsub;
+ u32 offset;
+
+ switch (plane->id) {
+ case PLANE_PRIMARY:
+ case PLANE_SPRITE0:
+ break;
+ default:
+ DRM_DEBUG_KMS("RC support only on plane 1 and 2\n");
+ return -EINVAL;
+ }
+
+ if (crtc->pipe == PIPE_C) {
+ DRM_DEBUG_KMS("No RC support on pipe C\n");
+ return -EINVAL;
+ }
+
+ if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) {
+ DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n",
+ plane_state->base.rotation);
+ return -EINVAL;
+ }
+
+ intel_add_fb_offsets(&x, &y, plane_state, 1);
+ offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
+
+ plane_state->aux.offset = offset;
+ plane_state->aux.x = x * hsub + src_x % hsub;
+ plane_state->aux.y = y * vsub + src_y % vsub;
+
+ return 0;
+}
+
int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -2962,6 +3156,11 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
+ } else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
+ ret = skl_check_ccs_aux_surface(plane_state);
+ if (ret)
+ return ret;
} else {
plane_state->aux.offset = ~0xfff;
plane_state->aux.x = 0;
@@ -3268,8 +3467,12 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
return PLANE_CTL_TILED_X;
case I915_FORMAT_MOD_Y_TILED:
return PLANE_CTL_TILED_Y;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Yf_TILED:
return PLANE_CTL_TILED_YF;
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
default:
MISSING_CASE(fb_modifier);
}
@@ -3311,7 +3514,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
- if (!IS_GEMINILAKE(dev_priv)) {
+ if (!IS_GEMINILAKE(dev_priv) && !IS_CANNONLAKE(dev_priv)) {
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3342,6 +3545,7 @@ static void skylake_update_primary_plane(struct intel_plane *plane,
u32 plane_ctl = plane_state->ctl;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
+ u32 aux_stride = skl_plane_stride(fb, 1, rotation);
u32 surf_addr = plane_state->main.offset;
int scaler_id = plane_state->scaler_id;
int src_x = plane_state->main.x;
@@ -3367,7 +3571,7 @@ static void skylake_update_primary_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
PLANE_COLOR_PIPE_GAMMA_ENABLE |
PLANE_COLOR_PIPE_CSC_ENABLE |
@@ -3378,6 +3582,10 @@ static void skylake_update_primary_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+ I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
+ (plane_state->aux.offset - surf_addr) | aux_stride);
+ I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->aux.y << 16) | plane_state->aux.x);
if (scaler_id >= 0) {
uint32_t ps_ctrl = 0;
@@ -3419,14 +3627,6 @@ static void skylake_disable_primary_plane(struct intel_plane *primary,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&dev_priv->drm, crtc)
- intel_finish_page_flip_cs(dev_priv, crtc->pipe);
-}
-
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -3485,12 +3685,14 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
!gpu_reset_clobbers_display(dev_priv))
return;
- /* We have a modeset vs reset deadlock, defensively unbreak it.
- *
- * FIXME: We can do a _lot_ better, this is just a first iteration.
- */
- i915_gem_set_wedged(dev_priv);
- DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n");
+ /* We have a modeset vs reset deadlock, defensively unbreak it. */
+ set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
+ wake_up_all(&dev_priv->gpu_error.wait_queue);
+
+ if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
+ DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
+ i915_gem_set_wedged(dev_priv);
+ }
/*
* Need mode_config.mutex so that we don't
@@ -3542,13 +3744,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
if (!state)
goto unlock;
- /*
- * Flips in the rings will be nuked by the reset,
- * so complete all pending flips so that user space
- * will get its events and not get stuck.
- */
- intel_complete_page_flips(dev_priv);
-
dev_priv->modeset_restore_state = NULL;
/* reset doesn't touch the display */
@@ -3585,35 +3780,8 @@ unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
-}
-
-static bool abort_flip_on_reset(struct intel_crtc *crtc)
-{
- struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
-
- if (i915_reset_backoff(error))
- return true;
-
- if (crtc->reset_count != i915_reset_count(error))
- return true;
- return false;
-}
-
-static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- bool pending;
-
- if (abort_flip_on_reset(intel_crtc))
- return false;
-
- spin_lock_irq(&dev->event_lock);
- pending = to_intel_crtc(crtc)->flip_work != NULL;
- spin_unlock_irq(&dev->event_lock);
-
- return pending;
+ clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
}
static void intel_update_pipe_config(struct intel_crtc *crtc,
@@ -4170,21 +4338,22 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
{
- struct intel_crtc *crtc;
-
- /* Note that we don't need to be called with mode_config.lock here
- * as our list of CRTC objects is static for the lifetime of the
- * device and so cannot disappear as we iterate. Similarly, we can
- * happily treat the predicates as racy, atomic checks as userspace
- * cannot claim and pin a new fb without at least acquring the
- * struct_mutex and so serialising with us.
- */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- if (atomic_read(&crtc->unpin_work_count) == 0)
+ struct drm_crtc *crtc;
+ bool cleanup_done;
+
+ drm_for_each_crtc(crtc, &dev_priv->drm) {
+ struct drm_crtc_commit *commit;
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ cleanup_done = commit ?
+ try_wait_for_completion(&commit->cleanup_done) : true;
+ spin_unlock(&crtc->commit_lock);
+
+ if (cleanup_done)
continue;
- if (crtc->flip_work)
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ drm_crtc_wait_one_vblank(crtc);
return true;
}
@@ -4192,57 +4361,6 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
return false;
}
-static void page_flip_completed(struct intel_crtc *intel_crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct intel_flip_work *work = intel_crtc->flip_work;
-
- intel_crtc->flip_work = NULL;
-
- if (work->event)
- drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
-
- drm_crtc_vblank_put(&intel_crtc->base);
-
- wake_up_all(&dev_priv->pending_flip_queue);
- trace_i915_flip_complete(intel_crtc->plane,
- work->pending_flip_obj);
-
- queue_work(dev_priv->wq, &work->unpin_work);
-}
-
-static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- long ret;
-
- WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
-
- ret = wait_event_interruptible_timeout(
- dev_priv->pending_flip_queue,
- !intel_crtc_has_pending_flip(crtc),
- 60*HZ);
-
- if (ret < 0)
- return ret;
-
- if (ret == 0) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_flip_work *work;
-
- spin_lock_irq(&dev->event_lock);
- work = intel_crtc->flip_work;
- if (work && !is_mmio_work(work)) {
- WARN_ONCE(1, "Removing stuck page flip\n");
- page_flip_completed(intel_crtc);
- }
- spin_unlock_irq(&dev->event_lock);
- }
-
- return 0;
-}
-
void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
u32 temp;
@@ -4562,7 +4680,7 @@ static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
+ assert_pch_transcoder_disabled(dev_priv, PIPE_A);
lpt_program_iclkip(crtc);
@@ -4595,6 +4713,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
&crtc_state->scaler_state;
struct intel_crtc *intel_crtc =
to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int need_scaling;
/*
@@ -4604,6 +4725,21 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
need_scaling = src_w != dst_w || src_h != dst_h;
+ if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
+ need_scaling = true;
+
+ /*
+ * Scaling/fitting not supported in IF-ID mode in GEN9+
+ * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
+ * Once NV12 is enabled, handle it here while allocating scaler
+ * for NV12.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+ need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
+ return -EINVAL;
+ }
+
/*
* if plane is being disabled or scaler is no more required or force detach
* - free scaler binded to this plane/crtc
@@ -5315,8 +5451,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
return;
if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
- false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
@@ -5401,8 +5536,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
- true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
/* If we change the relative order between pipe/planes enabling, we need
@@ -5499,8 +5633,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
- false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -5528,8 +5661,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (old_crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
- true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5838,8 +5970,6 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
return;
if (crtc->primary->state->visible) {
- WARN_ON(intel_crtc->flip_work);
-
intel_pre_disable_primary_noatomic(crtc);
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
@@ -6248,6 +6378,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
+ if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
+ /*
+ * There is only one pipe CSC unit per pipe, and we need that
+ * for output conversion from RGB->YCBCR. So if CTM is already
+ * applied we can't support YCBCR420 output.
+ */
+ DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
/*
* Pipe horizontal size must be even in:
* - DVO ganged mode
@@ -8041,6 +8181,7 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *config = intel_crtc->config;
if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
u32 val = 0;
@@ -8066,6 +8207,12 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
if (intel_crtc->config->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+ if (config->ycbcr420) {
+ val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
+ PIPEMISC_YUV420_ENABLE |
+ PIPEMISC_YUV420_MODE_FULL_BLEND;
+ }
+
I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
}
}
@@ -8393,10 +8540,16 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->modifier = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
- fb->modifier = I915_FORMAT_MOD_Y_TILED;
+ if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF:
- fb->modifier = I915_FORMAT_MOD_Yf_TILED;
+ if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED;
break;
default:
MISSING_CASE(tiling);
@@ -8630,7 +8783,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
- I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+ I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)),
+ "Display power well on\n");
I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
@@ -9100,12 +9254,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
u64 power_domain_mask;
bool active;
- if (INTEL_GEN(dev_priv) >= 9) {
- intel_crtc_init_scalers(crtc, pipe_config);
-
- pipe_config->scaler_state.scaler_id = -1;
- pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
- }
+ intel_crtc_init_scalers(crtc, pipe_config);
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
@@ -9135,6 +9284,23 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
+ if (IS_BROADWELL(dev_priv) || dev_priv->info.gen >= 9) {
+ u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+ bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
+
+ if (IS_GEMINILAKE(dev_priv) || dev_priv->info.gen >= 10) {
+ bool blend_mode_420 = tmp &
+ PIPEMISC_YUV420_MODE_FULL_BLEND;
+
+ pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
+ if (pipe_config->ycbcr420 != clrspace_yuv ||
+ pipe_config->ycbcr420 != blend_mode_420)
+ DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
+ } else if (clrspace_yuv) {
+ DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
+ }
+ }
+
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT_ULL(power_domain);
@@ -10112,849 +10278,11 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct intel_flip_work *work;
-
- spin_lock_irq(&dev->event_lock);
- work = intel_crtc->flip_work;
- intel_crtc->flip_work = NULL;
- spin_unlock_irq(&dev->event_lock);
-
- if (work) {
- cancel_work_sync(&work->mmio_work);
- cancel_work_sync(&work->unpin_work);
- kfree(work);
- }
drm_crtc_cleanup(crtc);
-
kfree(intel_crtc);
}
-static void intel_unpin_work_fn(struct work_struct *__work)
-{
- struct intel_flip_work *work =
- container_of(__work, struct intel_flip_work, unpin_work);
- struct intel_crtc *crtc = to_intel_crtc(work->crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_plane *primary = crtc->base.primary;
-
- if (is_mmio_work(work))
- flush_work(&work->mmio_work);
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_vma(work->old_vma);
- i915_gem_object_put(work->pending_flip_obj);
- mutex_unlock(&dev->struct_mutex);
-
- i915_gem_request_put(work->flip_queued_req);
-
- intel_frontbuffer_flip_complete(to_i915(dev),
- to_intel_plane(primary)->frontbuffer_bit);
- intel_fbc_post_update(crtc);
- drm_framebuffer_unreference(work->old_fb);
-
- BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
- atomic_dec(&crtc->unpin_work_count);
-
- kfree(work);
-}
-
-/* Is 'a' after or equal to 'b'? */
-static bool g4x_flip_count_after_eq(u32 a, u32 b)
-{
- return !((a - b) & 0x80000000);
-}
-
-static bool __pageflip_finished_cs(struct intel_crtc *crtc,
- struct intel_flip_work *work)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (abort_flip_on_reset(crtc))
- return true;
-
- /*
- * The relevant registers doen't exist on pre-ctg.
- * As the flip done interrupt doesn't trigger for mmio
- * flips on gmch platforms, a flip count check isn't
- * really needed there. But since ctg has the registers,
- * include it in the check anyway.
- */
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- return true;
-
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
-
- /*
- * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
- * used the same base address. In that case the mmio flip might
- * have completed, but the CS hasn't even executed the flip yet.
- *
- * A flip count check isn't enough as the CS might have updated
- * the base address just after start of vblank, but before we
- * managed to process the interrupt. This means we'd complete the
- * CS flip too soon.
- *
- * Combining both checks should get us a good enough result. It may
- * still happen that the CS flip has been executed, but has not
- * yet actually completed. But in case the base address is the same
- * anyway, we don't really care.
- */
- return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
- crtc->flip_work->gtt_offset &&
- g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
- crtc->flip_work->flip_count);
-}
-
-static bool
-__pageflip_finished_mmio(struct intel_crtc *crtc,
- struct intel_flip_work *work)
-{
- /*
- * MMIO work completes when vblank is different from
- * flip_queued_vblank.
- *
- * Reset counter value doesn't matter, this is handled by
- * i915_wait_request finishing early, so no need to handle
- * reset here.
- */
- return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
-}
-
-
-static bool pageflip_finished(struct intel_crtc *crtc,
- struct intel_flip_work *work)
-{
- if (!atomic_read(&work->pending))
- return false;
-
- smp_rmb();
-
- if (is_mmio_work(work))
- return __pageflip_finished_mmio(crtc, work);
- else
- return __pageflip_finished_cs(crtc, work);
-}
-
-void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_flip_work *work;
- unsigned long flags;
-
- /* Ignore early vblank irqs */
- if (!crtc)
- return;
-
- /*
- * This is called both by irq handlers and the reset code (to complete
- * lost pageflips) so needs the full irqsave spinlocks.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- work = crtc->flip_work;
-
- if (work != NULL &&
- !is_mmio_work(work) &&
- pageflip_finished(crtc, work))
- page_flip_completed(crtc);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_flip_work *work;
- unsigned long flags;
-
- /* Ignore early vblank irqs */
- if (!crtc)
- return;
-
- /*
- * This is called both by irq handlers and the reset code (to complete
- * lost pageflips) so needs the full irqsave spinlocks.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- work = crtc->flip_work;
-
- if (work != NULL &&
- is_mmio_work(work) &&
- pageflip_finished(crtc, work))
- page_flip_completed(crtc);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
- struct intel_flip_work *work)
-{
- work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
-
- /* Ensure that the work item is consistent when activating it ... */
- smp_mb__before_atomic();
- atomic_set(&work->pending, 1);
-}
-
-static int intel_gen2_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 flip_mask, *cs;
-
- cs = intel_ring_begin(req, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Can't queue multiple flips, so wait for the previous
- * one to finish before executing the next.
- */
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
- *cs++ = MI_NOOP;
- *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
- *cs++ = fb->pitches[0];
- *cs++ = intel_crtc->flip_work->gtt_offset;
- *cs++ = 0; /* aux display base address, unused */
-
- return 0;
-}
-
-static int intel_gen3_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 flip_mask, *cs;
-
- cs = intel_ring_begin(req, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
- *cs++ = MI_NOOP;
- *cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
- *cs++ = fb->pitches[0];
- *cs++ = intel_crtc->flip_work->gtt_offset;
- *cs++ = MI_NOOP;
-
- return 0;
-}
-
-static int intel_gen4_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 pf, pipesrc, *cs;
-
- cs = intel_ring_begin(req, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* i965+ uses the linear or tiled offsets from the
- * Display Registers (which do not change across a page-flip)
- * so we need only reprogram the base address.
- */
- *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
- *cs++ = fb->pitches[0];
- *cs++ = intel_crtc->flip_work->gtt_offset |
- intel_fb_modifier_to_tiling(fb->modifier);
-
- /* XXX Enabling the panel-fitter across page-flip is so far
- * untested on non-native modes, so ignore it for now.
- * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- *cs++ = pf | pipesrc;
-
- return 0;
-}
-
-static int intel_gen6_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 pf, pipesrc, *cs;
-
- cs = intel_ring_begin(req, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
- *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
- *cs++ = intel_crtc->flip_work->gtt_offset;
-
- /* Contrary to the suggestions in the documentation,
- * "Enable Panel Fitter" does not seem to be required when page
- * flipping with a non-native mode, and worse causes a normal
- * modeset to fail.
- * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- *cs++ = pf | pipesrc;
-
- return 0;
-}
-
-static int intel_gen7_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 *cs, plane_bit = 0;
- int len, ret;
-
- switch (intel_crtc->plane) {
- case PLANE_A:
- plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
- break;
- case PLANE_B:
- plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
- break;
- case PLANE_C:
- plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
- break;
- default:
- WARN_ONCE(1, "unknown plane in flip command\n");
- return -ENODEV;
- }
-
- len = 4;
- if (req->engine->id == RCS) {
- len += 6;
- /*
- * On Gen 8, SRM is now taking an extra dword to accommodate
- * 48bits addresses, and we need a NOOP for the batch size to
- * stay even.
- */
- if (IS_GEN8(dev_priv))
- len += 2;
- }
-
- /*
- * BSpec MI_DISPLAY_FLIP for IVB:
- * "The full packet must be contained within the same cache line."
- *
- * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
- * cacheline, if we ever start emitting more commands before
- * the MI_DISPLAY_FLIP we may need to first emit everything else,
- * then do the cacheline alignment, and finally emit the
- * MI_DISPLAY_FLIP.
- */
- ret = intel_ring_cacheline_align(req);
- if (ret)
- return ret;
-
- cs = intel_ring_begin(req, len);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Unmask the flip-done completion message. Note that the bspec says that
- * we should do this for both the BCS and RCS, and that we must not unmask
- * more than one flip event at any time (or ensure that one flip message
- * can be sent by waiting for flip-done prior to queueing new flips).
- * Experimentation says that BCS works despite DERRMR masking all
- * flip-done completion events and that unmasking all planes at once
- * for the RCS also doesn't appear to drop events. Setting the DERRMR
- * to zero does lead to lockups within MI_DISPLAY_FLIP.
- */
- if (req->engine->id == RCS) {
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(DERRMR);
- *cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE);
- if (IS_GEN8(dev_priv))
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
- MI_SRM_LRM_GLOBAL_GTT;
- else
- *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- *cs++ = i915_mmio_reg_offset(DERRMR);
- *cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
- if (IS_GEN8(dev_priv)) {
- *cs++ = 0;
- *cs++ = MI_NOOP;
- }
- }
-
- *cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
- *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
- *cs++ = intel_crtc->flip_work->gtt_offset;
- *cs++ = MI_NOOP;
-
- return 0;
-}
-
-static bool use_mmio_flip(struct intel_engine_cs *engine,
- struct drm_i915_gem_object *obj)
-{
- /*
- * This is not being used for older platforms, because
- * non-availability of flip done interrupt forces us to use
- * CS flips. Older platforms derive flip done using some clever
- * tricks involving the flip_pending status bits and vblank irqs.
- * So using MMIO flips there would disrupt this mechanism.
- */
-
- if (engine == NULL)
- return true;
-
- if (INTEL_GEN(engine->i915) < 5)
- return false;
-
- if (i915.use_mmio_flip < 0)
- return false;
- else if (i915.use_mmio_flip > 0)
- return true;
- else if (i915.enable_execlists)
- return true;
-
- return engine != i915_gem_object_last_write_engine(obj);
-}
-
-static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
- unsigned int rotation,
- struct intel_flip_work *work)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
- const enum pipe pipe = intel_crtc->pipe;
- u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
-
- ctl = I915_READ(PLANE_CTL(pipe, 0));
- ctl &= ~PLANE_CTL_TILED_MASK;
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- break;
- case I915_FORMAT_MOD_X_TILED:
- ctl |= PLANE_CTL_TILED_X;
- break;
- case I915_FORMAT_MOD_Y_TILED:
- ctl |= PLANE_CTL_TILED_Y;
- break;
- case I915_FORMAT_MOD_Yf_TILED:
- ctl |= PLANE_CTL_TILED_YF;
- break;
- default:
- MISSING_CASE(fb->modifier);
- }
-
- /*
- * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
- * PLANE_SURF updates, the update is then guaranteed to be atomic.
- */
- I915_WRITE(PLANE_CTL(pipe, 0), ctl);
- I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
-
- I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
- POSTING_READ(PLANE_SURF(pipe, 0));
-}
-
-static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
- struct intel_flip_work *work)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
- i915_reg_t reg = DSPCNTR(intel_crtc->plane);
- u32 dspcntr;
-
- dspcntr = I915_READ(reg);
-
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
-
- I915_WRITE(reg, dspcntr);
-
- I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
- POSTING_READ(DSPSURF(intel_crtc->plane));
-}
-
-static void intel_mmio_flip_work_func(struct work_struct *w)
-{
- struct intel_flip_work *work =
- container_of(w, struct intel_flip_work, mmio_work);
- struct intel_crtc *crtc = to_intel_crtc(work->crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_framebuffer *intel_fb =
- to_intel_framebuffer(crtc->base.primary->fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
-
- WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
-
- intel_pipe_update_start(crtc);
-
- if (INTEL_GEN(dev_priv) >= 9)
- skl_do_mmio_flip(crtc, work->rotation, work);
- else
- /* use_mmio_flip() retricts MMIO flips to ilk+ */
- ilk_do_mmio_flip(crtc, work);
-
- intel_pipe_update_end(crtc, work);
-}
-
-static int intel_default_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- return -ENODEV;
-}
-
-static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_flip_work *work)
-{
- u32 addr, vblank;
-
- if (!atomic_read(&work->pending))
- return false;
-
- smp_rmb();
-
- vblank = intel_crtc_get_vblank_counter(intel_crtc);
- if (work->flip_ready_vblank == 0) {
- if (work->flip_queued_req &&
- !i915_gem_request_completed(work->flip_queued_req))
- return false;
-
- work->flip_ready_vblank = vblank;
- }
-
- if (vblank - work->flip_ready_vblank < 3)
- return false;
-
- /* Potential stall - if we see that the flip has happened,
- * assume a missed interrupt. */
- if (INTEL_GEN(dev_priv) >= 4)
- addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
- else
- addr = I915_READ(DSPADDR(intel_crtc->plane));
-
- /* There is a potential issue here with a false positive after a flip
- * to the same address. We could address this by checking for a
- * non-incrementing frame counter.
- */
- return addr == work->gtt_offset;
-}
-
-void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_flip_work *work;
-
- WARN_ON(!in_interrupt());
-
- if (crtc == NULL)
- return;
-
- spin_lock(&dev->event_lock);
- work = crtc->flip_work;
-
- if (work != NULL && !is_mmio_work(work) &&
- __pageflip_stall_check_cs(dev_priv, crtc, work)) {
- WARN_ONCE(1,
- "Kicking stuck page flip: queued at %d, now %d\n",
- work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
- page_flip_completed(crtc);
- work = NULL;
- }
-
- if (work != NULL && !is_mmio_work(work) &&
- intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
- intel_queue_rps_boost_for_request(work->flip_queued_req);
- spin_unlock(&dev->event_lock);
-}
-
-__maybe_unused
-static int intel_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *primary = crtc->primary;
- enum pipe pipe = intel_crtc->pipe;
- struct intel_flip_work *work;
- struct intel_engine_cs *engine;
- bool mmio_flip;
- struct drm_i915_gem_request *request;
- struct i915_vma *vma;
- int ret;
-
- /*
- * drm_mode_page_flip_ioctl() should already catch this, but double
- * check to be safe. In the future we may enable pageflipping from
- * a disabled primary plane.
- */
- if (WARN_ON(intel_fb_obj(old_fb) == NULL))
- return -EBUSY;
-
- /* Can't change pixel format via MI display flips. */
- if (fb->format != crtc->primary->fb->format)
- return -EINVAL;
-
- /*
- * TILEOFF/LINOFF registers can't be changed via MI display flips.
- * Note that pitch changes could also affect these register.
- */
- if (INTEL_GEN(dev_priv) > 3 &&
- (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
- fb->pitches[0] != crtc->primary->fb->pitches[0]))
- return -EINVAL;
-
- if (i915_terminally_wedged(&dev_priv->gpu_error))
- goto out_hang;
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (work == NULL)
- return -ENOMEM;
-
- work->event = event;
- work->crtc = crtc;
- work->old_fb = old_fb;
- INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
-
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto free_work;
-
- /* We borrow the event spin lock for protecting flip_work */
- spin_lock_irq(&dev->event_lock);
- if (intel_crtc->flip_work) {
- /* Before declaring the flip queue wedged, check if
- * the hardware completed the operation behind our backs.
- */
- if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
- DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
- page_flip_completed(intel_crtc);
- } else {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- spin_unlock_irq(&dev->event_lock);
-
- drm_crtc_vblank_put(crtc);
- kfree(work);
- return -EBUSY;
- }
- }
- intel_crtc->flip_work = work;
- spin_unlock_irq(&dev->event_lock);
-
- if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
- flush_workqueue(dev_priv->wq);
-
- /* Reference the objects for the scheduled work. */
- drm_framebuffer_reference(work->old_fb);
-
- crtc->primary->fb = fb;
- update_state_fb(crtc->primary);
-
- work->pending_flip_obj = i915_gem_object_get(obj);
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto cleanup;
-
- intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
- if (i915_reset_backoff_or_wedged(&dev_priv->gpu_error)) {
- ret = -EIO;
- goto unlock;
- }
-
- atomic_inc(&intel_crtc->unpin_work_count);
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- engine = dev_priv->engine[BCS];
- if (fb->modifier != old_fb->modifier)
- /* vlv: DISPLAY_FLIP fails to change tiling */
- engine = NULL;
- } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
- engine = dev_priv->engine[BCS];
- } else if (INTEL_GEN(dev_priv) >= 7) {
- engine = i915_gem_object_last_write_engine(obj);
- if (engine == NULL || engine->id != RCS)
- engine = dev_priv->engine[BCS];
- } else {
- engine = dev_priv->engine[RCS];
- }
-
- mmio_flip = use_mmio_flip(engine, obj);
-
- vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto cleanup_pending;
- }
-
- work->old_vma = to_intel_plane_state(primary->state)->vma;
- to_intel_plane_state(primary->state)->vma = vma;
-
- work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
- work->rotation = crtc->primary->state->rotation;
-
- /*
- * There's the potential that the next frame will not be compatible with
- * FBC, so we want to call pre_update() before the actual page flip.
- * The problem is that pre_update() caches some information about the fb
- * object, so we want to do this only after the object is pinned. Let's
- * be on the safe side and do this immediately before scheduling the
- * flip.
- */
- intel_fbc_pre_update(intel_crtc, intel_crtc->config,
- to_intel_plane_state(primary->state));
-
- if (mmio_flip) {
- INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
- queue_work(system_unbound_wq, &work->mmio_work);
- } else {
- request = i915_gem_request_alloc(engine,
- dev_priv->kernel_context);
- if (IS_ERR(request)) {
- ret = PTR_ERR(request);
- goto cleanup_unpin;
- }
-
- ret = i915_gem_request_await_object(request, obj, false);
- if (ret)
- goto cleanup_request;
-
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
- page_flip_flags);
- if (ret)
- goto cleanup_request;
-
- intel_mark_page_flip_active(intel_crtc, work);
-
- work->flip_queued_req = i915_gem_request_get(request);
- i915_add_request(request);
- }
-
- i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
- i915_gem_track_fb(intel_fb_obj(old_fb), obj,
- to_intel_plane(primary)->frontbuffer_bit);
- mutex_unlock(&dev->struct_mutex);
-
- intel_frontbuffer_flip_prepare(to_i915(dev),
- to_intel_plane(primary)->frontbuffer_bit);
-
- trace_i915_flip_request(intel_crtc->plane, obj);
-
- return 0;
-
-cleanup_request:
- i915_add_request(request);
-cleanup_unpin:
- to_intel_plane_state(primary->state)->vma = work->old_vma;
- intel_unpin_fb_vma(vma);
-cleanup_pending:
- atomic_dec(&intel_crtc->unpin_work_count);
-unlock:
- mutex_unlock(&dev->struct_mutex);
-cleanup:
- crtc->primary->fb = old_fb;
- update_state_fb(crtc->primary);
-
- i915_gem_object_put(obj);
- drm_framebuffer_unreference(work->old_fb);
-
- spin_lock_irq(&dev->event_lock);
- intel_crtc->flip_work = NULL;
- spin_unlock_irq(&dev->event_lock);
-
- drm_crtc_vblank_put(crtc);
-free_work:
- kfree(work);
-
- if (ret == -EIO) {
- struct drm_atomic_state *state;
- struct drm_plane_state *plane_state;
-
-out_hang:
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
- state->acquire_ctx = dev->mode_config.acquire_ctx;
-
-retry:
- plane_state = drm_atomic_get_plane_state(state, primary);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret) {
- drm_atomic_set_fb_for_plane(plane_state, fb);
-
- ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
- if (!ret)
- ret = drm_atomic_commit(state);
- }
-
- if (ret == -EDEADLK) {
- drm_modeset_backoff(state->acquire_ctx);
- drm_atomic_state_clear(state);
- goto retry;
- }
-
- drm_atomic_state_put(state);
-
- if (ret == 0 && event) {
- spin_lock_irq(&dev->event_lock);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&dev->event_lock);
- }
- }
- return ret;
-}
-
-
/**
* intel_wm_need_update - Check whether watermarks need updating
* @plane: drm plane
@@ -11352,6 +10680,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes,
&pipe_config->fdi_m_n);
+ if (pipe_config->ycbcr420)
+ DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
+
if (intel_crtc_has_dp_encoder(pipe_config)) {
intel_dump_m_n_config(pipe_config, "dp m_n",
pipe_config->lane_count, &pipe_config->dp_m_n);
@@ -11923,6 +11254,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(hdmi_scrambling);
PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_I(has_infoframe);
+ PIPE_CONF_CHECK_I(ycbcr420);
PIPE_CONF_CHECK_I(has_audio);
@@ -12764,31 +12096,7 @@ static int intel_atomic_check(struct drm_device *dev,
static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- int i, ret;
-
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- if (state->legacy_cursor_update)
- continue;
-
- ret = intel_crtc_wait_for_pending_flips(crtc);
- if (ret)
- return ret;
-
- if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
- flush_workqueue(dev_priv->wq);
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return drm_atomic_helper_prepare_planes(dev, state);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
@@ -12796,7 +12104,7 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
if (!dev->max_vblank_count)
- return drm_accurate_vblank_count(&crtc->base);
+ return drm_crtc_accurate_vblank_count(&crtc->base);
return dev->driver->get_vblank_counter(dev, crtc->pipe);
}
@@ -12999,6 +12307,30 @@ static void intel_atomic_helper_free_state_worker(struct work_struct *work)
intel_atomic_helper_free_state(dev_priv);
}
+static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
+{
+ struct wait_queue_entry wait_fence, wait_reset;
+ struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+
+ init_wait_entry(&wait_fence, 0);
+ init_wait_entry(&wait_reset, 0);
+ for (;;) {
+ prepare_to_wait(&intel_state->commit_ready.wait,
+ &wait_fence, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&dev_priv->gpu_error.wait_queue,
+ &wait_reset, TASK_UNINTERRUPTIBLE);
+
+
+ if (i915_sw_fence_done(&intel_state->commit_ready)
+ || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+ break;
+
+ schedule();
+ }
+ finish_wait(&intel_state->commit_ready.wait, &wait_fence);
+ finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
+}
+
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -13012,6 +12344,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
unsigned crtc_vblank_mask = 0;
int i;
+ intel_atomic_commit_fence_wait(intel_state);
+
drm_atomic_helper_wait_for_dependencies(state);
if (intel_state->modeset)
@@ -13151,9 +12485,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
}
- mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
- mutex_unlock(&dev->struct_mutex);
drm_atomic_helper_commit_cleanup_done(state);
@@ -13179,10 +12511,8 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
switch (notify) {
case FENCE_COMPLETE:
- if (state->base.commit_work.func)
- queue_work(system_unbound_wq, &state->base.commit_work);
+ /* we do blocking waits in the worker, nothing to do here */
break;
-
case FENCE_FREE:
{
struct intel_atomic_helper *helper =
@@ -13264,7 +12594,13 @@ static int intel_atomic_commit(struct drm_device *dev,
if (INTEL_GEN(dev_priv) < 9)
state->legacy_cursor_update = false;
- drm_atomic_helper_swap_state(state, true);
+ ret = drm_atomic_helper_swap_state(state, true);
+ if (ret) {
+ i915_sw_fence_commit(&intel_state->commit_ready);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+ return ret;
+ }
dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
@@ -13278,14 +12614,14 @@ static int intel_atomic_commit(struct drm_device *dev,
}
drm_atomic_state_get(state);
- INIT_WORK(&state->commit_work,
- nonblock ? intel_atomic_commit_work : NULL);
+ INIT_WORK(&state->commit_work, intel_atomic_commit_work);
i915_sw_fence_commit(&intel_state->commit_ready);
- if (!nonblock) {
- i915_sw_fence_wait(&intel_state->commit_ready);
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
intel_atomic_commit_tail(state);
- }
+
return 0;
}
@@ -13293,7 +12629,6 @@ static int intel_atomic_commit(struct drm_device *dev,
static const struct drm_crtc_funcs intel_crtc_funcs = {
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
- .set_property = drm_atomic_helper_crtc_set_property,
.destroy = intel_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
@@ -13327,32 +12662,6 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret;
- if (obj) {
- if (plane->type == DRM_PLANE_TYPE_CURSOR &&
- INTEL_INFO(dev_priv)->cursor_needs_physical) {
- const int align = intel_cursor_alignment(dev_priv);
-
- ret = i915_gem_object_attach_phys(obj, align);
- if (ret) {
- DRM_DEBUG_KMS("failed to attach phys object\n");
- return ret;
- }
- } else {
- struct i915_vma *vma;
-
- vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
- if (IS_ERR(vma)) {
- DRM_DEBUG_KMS("failed to pin object\n");
- return PTR_ERR(vma);
- }
-
- to_intel_plane_state(new_state)->vma = vma;
- }
- }
-
- if (!obj && !old_obj)
- return 0;
-
if (old_obj) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(new_state->state,
@@ -13391,6 +12700,38 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (!obj)
return 0;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+ if (ret) {
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+ }
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ const int align = intel_cursor_alignment(dev_priv);
+
+ ret = i915_gem_object_attach_phys(obj, align);
+ } else {
+ struct i915_vma *vma;
+
+ vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+ if (!IS_ERR(vma))
+ to_intel_plane_state(new_state)->vma = vma;
+ else
+ ret = PTR_ERR(vma);
+ }
+
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_object_unpin_pages(obj);
+ if (ret)
+ return ret;
+
if (!new_state->fence) { /* implicit fencing */
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
obj->resv, NULL,
@@ -13398,8 +12739,6 @@ intel_prepare_plane_fb(struct drm_plane *plane,
GFP_KERNEL);
if (ret < 0)
return ret;
-
- i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
}
return 0;
@@ -13422,8 +12761,11 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
/* Should only be called after a successful intel_prepare_plane_fb()! */
vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
- if (vma)
+ if (vma) {
+ mutex_lock(&plane->dev->struct_mutex);
intel_unpin_fb_vma(vma);
+ mutex_unlock(&plane->dev->struct_mutex);
+ }
}
int
@@ -13550,7 +12892,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_pipe_update_end(intel_crtc, NULL);
+ intel_pipe_update_end(intel_crtc);
}
/**
@@ -13566,15 +12908,110 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-const struct drm_plane_funcs intel_plane_funcs = {
+static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XRGB8888:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i965_mod_supported(uint32_t format, uint64_t modifier)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS)
+ return true;
+ /* fall through */
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ if (modifier == I915_FORMAT_MOD_Yf_TILED)
+ return true;
+ /* fall through */
+ case DRM_FORMAT_C8:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ /* fall through */
+ default:
+ return false;
+ }
+}
+
+static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
+
+ if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
+ return false;
+
+ if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
+ modifier != DRM_FORMAT_MOD_LINEAR)
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_mod_supported(format, modifier);
+ else if (INTEL_GEN(dev_priv) >= 4)
+ return i965_mod_supported(format, modifier);
+ else
+ return i8xx_mod_supported(format, modifier);
+
+ unreachable();
+}
+
+static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
+ return false;
+
+ return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
+}
+
+static struct drm_plane_funcs intel_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .set_property = drm_atomic_helper_plane_set_property,
.atomic_get_property = intel_plane_atomic_get_property,
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = intel_primary_plane_format_mod_supported,
};
static int
@@ -13593,7 +13030,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *old_fb;
struct drm_crtc_state *crtc_state = crtc->state;
- struct i915_vma *old_vma;
+ struct i915_vma *old_vma, *vma;
/*
* When crtc is inactive or there is a modeset pending,
@@ -13651,8 +13088,6 @@ intel_legacy_cursor_update(struct drm_plane *plane,
goto out_unlock;
}
} else {
- struct i915_vma *vma;
-
vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation);
if (IS_ERR(vma)) {
DRM_DEBUG_KMS("failed to pin object\n");
@@ -13675,7 +13110,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state);
new_plane_state->fence = NULL;
new_plane_state->fb = old_fb;
- to_intel_plane_state(new_plane_state)->vma = old_vma;
+ to_intel_plane_state(new_plane_state)->vma = NULL;
if (plane->state->visible) {
trace_intel_update_plane(plane, to_intel_crtc(crtc));
@@ -13687,7 +13122,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
}
- intel_cleanup_plane_fb(plane, new_plane_state);
+ if (old_vma)
+ intel_unpin_fb_vma(old_vma);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -13705,11 +13141,11 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.update_plane = intel_legacy_cursor_update,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .set_property = drm_atomic_helper_plane_set_property,
.atomic_get_property = intel_plane_atomic_get_property,
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = intel_cursor_plane_format_mod_supported,
};
static struct intel_plane *
@@ -13720,6 +13156,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
unsigned int num_formats;
+ const uint64_t *modifiers;
int ret;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
@@ -13755,21 +13192,34 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
+ modifiers = skl_format_modifiers_ccs;
+
+ primary->update_plane = skylake_update_primary_plane;
+ primary->disable_plane = skylake_disable_primary_plane;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
+ intel_primary_formats = skl_primary_formats;
+ num_formats = ARRAY_SIZE(skl_primary_formats);
+ if (pipe < PIPE_C)
+ modifiers = skl_format_modifiers_ccs;
+ else
+ modifiers = skl_format_modifiers_noccs;
primary->update_plane = skylake_update_primary_plane;
primary->disable_plane = skylake_disable_primary_plane;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
+ modifiers = i9xx_format_modifiers;
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
+ modifiers = i9xx_format_modifiers;
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
@@ -13779,18 +13229,21 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
0, &intel_plane_funcs,
intel_primary_formats, num_formats,
+ modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
0, &intel_plane_funcs,
intel_primary_formats, num_formats,
+ modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
0, &intel_plane_funcs,
intel_primary_formats, num_formats,
+ modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane %c", plane_name(primary->plane));
if (ret)
@@ -13876,6 +13329,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
+ cursor_format_modifiers,
DRM_PLANE_TYPE_CURSOR,
"cursor %c", pipe_name(pipe));
if (ret)
@@ -14398,10 +13852,12 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_framebuffer *fb = &intel_fb->base;
struct drm_format_name_buf format_name;
- u32 pitch_limit, stride_alignment;
+ u32 pitch_limit;
unsigned int tiling, stride;
int ret = -EINVAL;
+ int i;
i915_gem_object_lock(obj);
obj->framebuffer_references++;
@@ -14430,6 +13886,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
/* Passed in modifier sanity checking. */
switch (mode_cmd->modifier[0]) {
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ break;
+ default:
+ DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
+ goto err;
+ }
+ /* fall through */
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
if (INTEL_GEN(dev_priv) < 9) {
@@ -14534,25 +14003,46 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (mode_cmd->offsets[0] != 0)
goto err;
- drm_helper_mode_fill_fb_struct(&dev_priv->drm,
- &intel_fb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
- stride_alignment = intel_fb_stride_alignment(&intel_fb->base, 0);
- if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
- DRM_DEBUG_KMS("pitch (%d) must be at least %u byte aligned\n",
- mode_cmd->pitches[0], stride_alignment);
- goto err;
+ for (i = 0; i < fb->format->num_planes; i++) {
+ u32 stride_alignment;
+
+ if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
+ DRM_DEBUG_KMS("bad plane %d handle\n", i);
+ return -EINVAL;
+ }
+
+ stride_alignment = intel_fb_stride_alignment(fb, i);
+
+ /*
+ * Display WA #0531: skl,bxt,kbl,glk
+ *
+ * Render decompression and plane width > 3840
+ * combined with horizontal panning requires the
+ * plane stride to be a multiple of 4. We'll just
+ * require the entire fb to accommodate that to avoid
+ * potential runtime errors at plane configuration time.
+ */
+ if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
+ (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
+ stride_alignment *= 4;
+
+ if (fb->pitches[i] & (stride_alignment - 1)) {
+ DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
+ i, fb->pitches[i], stride_alignment);
+ goto err;
+ }
}
intel_fb->obj = obj;
- ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
+ ret = intel_fill_fb_info(dev_priv, fb);
if (ret)
goto err;
- ret = drm_framebuffer_init(obj->base.dev,
- &intel_fb->base,
- &intel_fb_funcs);
+ ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
goto err;
@@ -14600,6 +14090,7 @@ static void intel_atomic_state_free(struct drm_atomic_state *state)
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
+ .get_format_info = intel_get_format_info,
.output_poll_changed = intel_fbdev_output_poll_changed,
.atomic_check = intel_atomic_check,
.atomic_commit = intel_atomic_commit,
@@ -14699,34 +14190,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.update_crtcs = skl_update_crtcs;
else
dev_priv->display.update_crtcs = intel_update_crtcs;
-
- switch (INTEL_INFO(dev_priv)->gen) {
- case 2:
- dev_priv->display.queue_flip = intel_gen2_queue_flip;
- break;
-
- case 3:
- dev_priv->display.queue_flip = intel_gen3_queue_flip;
- break;
-
- case 4:
- case 5:
- dev_priv->display.queue_flip = intel_gen4_queue_flip;
- break;
-
- case 6:
- dev_priv->display.queue_flip = intel_gen6_queue_flip;
- break;
- case 7:
- case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
- dev_priv->display.queue_flip = intel_gen7_queue_flip;
- break;
- case 9:
- /* Drop through - unsupported since execlist only. */
- default:
- /* Default just returns -ENODEV to indicate unsupported */
- dev_priv->display.queue_flip = intel_default_queue_flip;
- }
}
/*
@@ -14758,6 +14221,17 @@ static void quirk_backlight_present(struct drm_device *dev)
DRM_INFO("applying backlight present quirk\n");
}
+/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
+ * which is 300 ms greater than eDP spec T12 min.
+ */
+static void quirk_increase_t12_delay(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
+ DRM_INFO("Applying T12 delay quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -14841,6 +14315,9 @@ static struct intel_quirk intel_quirks[] = {
/* Dell Chromebook 11 (2015 version) */
{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Toshiba Satellite P50-C-18C */
+ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -15643,7 +15120,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_wm_get_hw_state(dev);
vlv_wm_sanitize(dev_priv);
- } else if (IS_GEN9(dev_priv)) {
+ } else if (INTEL_GEN(dev_priv) >= 9) {
skl_wm_get_hw_state(dev);
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev);
@@ -15750,6 +15227,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
*/
drm_kms_helper_poll_fini(dev);
+ /* poll work can call into fbdev, hence clean that up afterwards */
+ intel_fbdev_fini(dev_priv);
+
intel_unregister_dsm_handler();
intel_fbc_global_disable(dev_priv);
@@ -15869,7 +15349,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
return NULL;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
+ error->power_well_driver =
+ I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL));
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =