diff options
Diffstat (limited to 'drivers')
74 files changed, 7575 insertions, 3466 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9a024f899dd4..f3334829e55a 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -153,7 +153,6 @@ static struct page *i8xx_alloc_pages(void) __free_pages(page, 2); return NULL; } - get_page(page); atomic_inc(&agp_bridge->current_memory_agp); return page; } @@ -164,7 +163,6 @@ static void i8xx_destroy_pages(struct page *page) return; set_pages_wb(page, 4); - put_page(page); __free_pages(page, 2); atomic_dec(&agp_bridge->current_memory_agp); } @@ -300,7 +298,6 @@ static int intel_gtt_setup_scratch_page(void) page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (page == NULL) return -ENOMEM; - get_page(page); set_pages_uc(page, 1); if (intel_private.needs_dmar) { @@ -560,7 +557,6 @@ static void intel_gtt_teardown_scratch_page(void) set_pages_wb(intel_private.scratch_page, 1); pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - put_page(intel_private.scratch_page); __free_page(intel_private.scratch_page); } diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 9292a761ea6d..c3cf64ce2891 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ drm_trace_points.o drm_global.o drm_prime.o \ drm_rect.o drm_vma_manager.o drm_flip_work.o \ - drm_modeset_lock.o + drm_modeset_lock.o drm_atomic.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o @@ -23,7 +23,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o drm-$(CONFIG_OF) += drm_of.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ - drm_plane_helper.o drm_dp_mst_topology.o + drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o diff --git a/drivers/gpu/drm/README.drm b/drivers/gpu/drm/README.drm deleted file mode 100644 index b5b332722581..000000000000 --- a/drivers/gpu/drm/README.drm +++ /dev/null @@ -1,43 +0,0 @@ -************************************************************ -* For the very latest on DRI development, please see: * -* http://dri.freedesktop.org/ * -************************************************************ - -The Direct Rendering Manager (drm) is a device-independent kernel-level -device driver that provides support for the XFree86 Direct Rendering -Infrastructure (DRI). - -The DRM supports the Direct Rendering Infrastructure (DRI) in four major -ways: - - 1. The DRM provides synchronized access to the graphics hardware via - the use of an optimized two-tiered lock. - - 2. The DRM enforces the DRI security policy for access to the graphics - hardware by only allowing authenticated X11 clients access to - restricted regions of memory. - - 3. The DRM provides a generic DMA engine, complete with multiple - queues and the ability to detect the need for an OpenGL context - switch. - - 4. The DRM is extensible via the use of small device-specific modules - that rely extensively on the API exported by the DRM module. - - -Documentation on the DRI is available from: - http://dri.freedesktop.org/wiki/Documentation - http://sourceforge.net/project/showfiles.php?group_id=387 - http://dri.sourceforge.net/doc/ - -For specific information about kernel-level support, see: - - The Direct Rendering Manager, Kernel Support for the Direct Rendering - Infrastructure - http://dri.sourceforge.net/doc/drm_low_level.html - - Hardware Locking for the Direct Rendering Infrastructure - http://dri.sourceforge.net/doc/hardware_locking_low_level.html - - A Security Analysis of the Direct Rendering Infrastructure - http://dri.sourceforge.net/doc/security_low_level.html diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index e4a1490b42c2..e3a7a5078e5c 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -12,6 +12,7 @@ #include <linux/platform_device.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> #include "armada_crtc.h" #include "armada_drm.h" #include "armada_fb.h" diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 9dc0fd5c1ea4..b7ee2634e47c 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -31,6 +31,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> #include "ast_drv.h" #include "ast_tables.h" diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 6b7efcf363d6..5ffd4895d040 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -6,6 +6,7 @@ */ #include "bochs.h" +#include <drm/drm_plane_helper.h> static int defx = 1024; static int defy = 768; diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index c7c5a9d91fa0..99d4a74ffeaf 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -16,6 +16,7 @@ */ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> #include <video/cirrus.h> diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c new file mode 100644 index 000000000000..ed991ba66e21 --- /dev/null +++ b/drivers/gpu/drm/drm_atomic.c @@ -0,0 +1,628 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_plane_helper.h> + +static void kfree_state(struct drm_atomic_state *state) +{ + kfree(state->connectors); + kfree(state->connector_states); + kfree(state->crtcs); + kfree(state->crtc_states); + kfree(state->planes); + kfree(state->plane_states); + kfree(state); +} + +/** + * drm_atomic_state_alloc - allocate atomic state + * @dev: DRM device + * + * This allocates an empty atomic state to track updates. + */ +struct drm_atomic_state * +drm_atomic_state_alloc(struct drm_device *dev) +{ + struct drm_atomic_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + state->crtcs = kcalloc(dev->mode_config.num_crtc, + sizeof(*state->crtcs), GFP_KERNEL); + if (!state->crtcs) + goto fail; + state->crtc_states = kcalloc(dev->mode_config.num_crtc, + sizeof(*state->crtc_states), GFP_KERNEL); + if (!state->crtc_states) + goto fail; + state->planes = kcalloc(dev->mode_config.num_total_plane, + sizeof(*state->planes), GFP_KERNEL); + if (!state->planes) + goto fail; + state->plane_states = kcalloc(dev->mode_config.num_total_plane, + sizeof(*state->plane_states), GFP_KERNEL); + if (!state->plane_states) + goto fail; + state->connectors = kcalloc(dev->mode_config.num_connector, + sizeof(*state->connectors), + GFP_KERNEL); + if (!state->connectors) + goto fail; + state->connector_states = kcalloc(dev->mode_config.num_connector, + sizeof(*state->connector_states), + GFP_KERNEL); + if (!state->connector_states) + goto fail; + + state->dev = dev; + + DRM_DEBUG_KMS("Allocate atomic state %p\n", state); + + return state; +fail: + kfree_state(state); + + return NULL; +} +EXPORT_SYMBOL(drm_atomic_state_alloc); + +/** + * drm_atomic_state_clear - clear state object + * @state: atomic state + * + * When the w/w mutex algorithm detects a deadlock we need to back off and drop + * all locks. So someone else could sneak in and change the current modeset + * configuration. Which means that all the state assembled in @state is no + * longer an atomic update to the current state, but to some arbitrary earlier + * state. Which could break assumptions the driver's ->atomic_check likely + * relies on. + * + * Hence we must clear all cached state and completely start over, using this + * function. + */ +void drm_atomic_state_clear(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + int i; + + DRM_DEBUG_KMS("Clearing atomic state %p\n", state); + + for (i = 0; i < dev->mode_config.num_connector; i++) { + struct drm_connector *connector = state->connectors[i]; + + if (!connector) + continue; + + connector->funcs->atomic_destroy_state(connector, + state->connector_states[i]); + } + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct drm_crtc *crtc = state->crtcs[i]; + + if (!crtc) + continue; + + crtc->funcs->atomic_destroy_state(crtc, + state->crtc_states[i]); + } + + for (i = 0; i < dev->mode_config.num_total_plane; i++) { + struct drm_plane *plane = state->planes[i]; + + if (!plane) + continue; + + plane->funcs->atomic_destroy_state(plane, + state->plane_states[i]); + } +} +EXPORT_SYMBOL(drm_atomic_state_clear); + +/** + * drm_atomic_state_free - free all memory for an atomic state + * @state: atomic state to deallocate + * + * This frees all memory associated with an atomic state, including all the + * per-object state for planes, crtcs and connectors. + */ +void drm_atomic_state_free(struct drm_atomic_state *state) +{ + drm_atomic_state_clear(state); + + DRM_DEBUG_KMS("Freeing atomic state %p\n", state); + + kfree_state(state); +} +EXPORT_SYMBOL(drm_atomic_state_free); + +/** + * drm_atomic_get_crtc_state - get crtc state + * @state: global atomic state object + * @crtc: crtc to get state object for + * + * This function returns the crtc state for the given crtc, allocating it if + * needed. It will also grab the relevant crtc lock to make sure that the state + * is consistent. + * + * Returns: + * + * Either the allocated state or the error code encoded into the pointer. When + * the error is EDEADLK then the w/w mutex code has detected a deadlock and the + * entire atomic sequence must be restarted. All other errors are fatal. + */ +struct drm_crtc_state * +drm_atomic_get_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + int ret, index; + struct drm_crtc_state *crtc_state; + + index = drm_crtc_index(crtc); + + if (state->crtc_states[index]) + return state->crtc_states[index]; + + ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + crtc_state = crtc->funcs->atomic_duplicate_state(crtc); + if (!crtc_state) + return ERR_PTR(-ENOMEM); + + state->crtc_states[index] = crtc_state; + state->crtcs[index] = crtc; + crtc_state->state = state; + + DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n", + crtc->base.id, crtc_state, state); + + return crtc_state; +} +EXPORT_SYMBOL(drm_atomic_get_crtc_state); + +/** + * drm_atomic_get_plane_state - get plane state + * @state: global atomic state object + * @plane: plane to get state object for + * + * This function returns the plane state for the given plane, allocating it if + * needed. It will also grab the relevant plane lock to make sure that the state + * is consistent. + * + * Returns: + * + * Either the allocated state or the error code encoded into the pointer. When + * the error is EDEADLK then the w/w mutex code has detected a deadlock and the + * entire atomic sequence must be restarted. All other errors are fatal. + */ +struct drm_plane_state * +drm_atomic_get_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + int ret, index; + struct drm_plane_state *plane_state; + + index = drm_plane_index(plane); + + if (state->plane_states[index]) + return state->plane_states[index]; + + /* + * TODO: We currently don't have per-plane mutexes. So instead of trying + * crazy tricks with deferring plane->crtc and hoping for the best just + * grab all crtc locks. Once we have per-plane locks we must update this + * to only take the plane mutex. + */ + ret = drm_modeset_lock_all_crtcs(state->dev, state->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + plane_state = plane->funcs->atomic_duplicate_state(plane); + if (!plane_state) + return ERR_PTR(-ENOMEM); + + state->plane_states[index] = plane_state; + state->planes[index] = plane; + plane_state->state = state; + + DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n", + plane->base.id, plane_state, state); + + if (plane_state->crtc) { + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_crtc_state(state, + plane_state->crtc); + if (IS_ERR(crtc_state)) + return ERR_CAST(crtc_state); + } + + return plane_state; +} +EXPORT_SYMBOL(drm_atomic_get_plane_state); + +/** + * drm_atomic_get_connector_state - get connector state + * @state: global atomic state object + * @connector: connector to get state object for + * + * This function returns the connector state for the given connector, + * allocating it if needed. It will also grab the relevant connector lock to + * make sure that the state is consistent. + * + * Returns: + * + * Either the allocated state or the error code encoded into the pointer. When + * the error is EDEADLK then the w/w mutex code has detected a deadlock and the + * entire atomic sequence must be restarted. All other errors are fatal. + */ +struct drm_connector_state * +drm_atomic_get_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int ret, index; + struct drm_mode_config *config = &connector->dev->mode_config; + struct drm_connector_state *connector_state; + + index = drm_connector_index(connector); + + if (state->connector_states[index]) + return state->connector_states[index]; + + ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + connector_state = connector->funcs->atomic_duplicate_state(connector); + if (!connector_state) + return ERR_PTR(-ENOMEM); + + state->connector_states[index] = connector_state; + state->connectors[index] = connector; + connector_state->state = state; + + DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n", + connector->base.id, connector_state, state); + + if (connector_state->crtc) { + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_crtc_state(state, + connector_state->crtc); + if (IS_ERR(crtc_state)) + return ERR_CAST(crtc_state); + } + + return connector_state; +} +EXPORT_SYMBOL(drm_atomic_get_connector_state); + +/** + * drm_atomic_set_crtc_for_plane - set crtc for plane + * @plane_state: atomic state object for the plane + * @crtc: crtc to use for the plane + * + * Changing the assigned crtc for a plane requires us to grab the lock and state + * for the new crtc, as needed. This function takes care of all these details + * besides updating the pointer in the state object itself. + * + * Returns: + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK + * then the w/w mutex code has detected a deadlock and the entire atomic + * sequence must be restarted. All other errors are fatal. + */ +int +drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, + struct drm_crtc *crtc) +{ + struct drm_crtc_state *crtc_state; + + if (crtc) { + crtc_state = drm_atomic_get_crtc_state(plane_state->state, + crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + plane_state->crtc = crtc; + + if (crtc) + DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n", + plane_state, crtc->base.id); + else + DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state); + + return 0; +} +EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); + +/** + * drm_atomic_set_fb_for_plane - set crtc for plane + * @plane_state: atomic state object for the plane + * @fb: fb to use for the plane + * + * Changing the assigned framebuffer for a plane requires us to grab a reference + * to the new fb and drop the reference to the old fb, if there is one. This + * function takes care of all these details besides updating the pointer in the + * state object itself. + */ +void +drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, + struct drm_framebuffer *fb) +{ + if (plane_state->fb) + drm_framebuffer_unreference(plane_state->fb); + if (fb) + drm_framebuffer_reference(fb); + plane_state->fb = fb; + + if (fb) + DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n", + fb->base.id, plane_state); + else + DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state); +} +EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); + +/** + * drm_atomic_set_crtc_for_connector - set crtc for connector + * @conn_state: atomic state object for the connector + * @crtc: crtc to use for the connector + * + * Changing the assigned crtc for a connector requires us to grab the lock and + * state for the new crtc, as needed. This function takes care of all these + * details besides updating the pointer in the state object itself. + * + * Returns: + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK + * then the w/w mutex code has detected a deadlock and the entire atomic + * sequence must be restarted. All other errors are fatal. + */ +int +drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, + struct drm_crtc *crtc) +{ + struct drm_crtc_state *crtc_state; + + if (crtc) { + crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + conn_state->crtc = crtc; + + if (crtc) + DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n", + conn_state, crtc->base.id); + else + DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n", + conn_state); + + return 0; +} +EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); + +/** + * drm_atomic_add_affected_connectors - add connectors for crtc + * @state: atomic state + * @crtc: DRM crtc + * + * This function walks the current configuration and adds all connectors + * currently using @crtc to the atomic configuration @state. Note that this + * function must acquire the connection mutex. This can potentially cause + * unneeded seralization if the update is just for the planes on one crtc. Hence + * drivers and helpers should only call this when really needed (e.g. when a + * full modeset needs to happen due to some change). + * + * Returns: + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK + * then the w/w mutex code has detected a deadlock and the entire atomic + * sequence must be restarted. All other errors are fatal. + */ +int +drm_atomic_add_affected_connectors(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + struct drm_mode_config *config = &state->dev->mode_config; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + int ret; + + ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); + if (ret) + return ret; + + DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n", + crtc->base.id, state); + + /* + * Changed connectors are already in @state, so only need to look at the + * current configuration. + */ + list_for_each_entry(connector, &config->connector_list, head) { + if (connector->state->crtc != crtc) + continue; + + conn_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_add_affected_connectors); + +/** + * drm_atomic_connectors_for_crtc - count number of connected outputs + * @state: atomic state + * @crtc: DRM crtc + * + * This function counts all connectors which will be connected to @crtc + * according to @state. Useful to recompute the enable state for @crtc. + */ +int +drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + int nconnectors = state->dev->mode_config.num_connector; + int i, num_connected_connectors = 0; + + for (i = 0; i < nconnectors; i++) { + struct drm_connector_state *conn_state; + + conn_state = state->connector_states[i]; + + if (conn_state && conn_state->crtc == crtc) + num_connected_connectors++; + } + + DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n", + state, num_connected_connectors, crtc->base.id); + + return num_connected_connectors; +} +EXPORT_SYMBOL(drm_atomic_connectors_for_crtc); + +/** + * drm_atomic_legacy_backoff - locking backoff for legacy ioctls + * @state: atomic state + * + * This function should be used by legacy entry points which don't understand + * -EDEADLK semantics. For simplicity this one will grab all modeset locks after + * the slowpath completed. + */ +void drm_atomic_legacy_backoff(struct drm_atomic_state *state) +{ + int ret; + +retry: + drm_modeset_backoff(state->acquire_ctx); + + ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, + state->acquire_ctx); + if (ret) + goto retry; + ret = drm_modeset_lock_all_crtcs(state->dev, + state->acquire_ctx); + if (ret) + goto retry; +} +EXPORT_SYMBOL(drm_atomic_legacy_backoff); + +/** + * drm_atomic_check_only - check whether a given config would work + * @state: atomic configuration to check + * + * Note that this function can return -EDEADLK if the driver needed to acquire + * more locks but encountered a deadlock. The caller must then do the usual w/w + * backoff dance and restart. All other errors are fatal. + * + * Returns: + * 0 on success, negative error code on failure. + */ +int drm_atomic_check_only(struct drm_atomic_state *state) +{ + struct drm_mode_config *config = &state->dev->mode_config; + + DRM_DEBUG_KMS("checking %p\n", state); + + if (config->funcs->atomic_check) + return config->funcs->atomic_check(state->dev, state); + else + return 0; +} +EXPORT_SYMBOL(drm_atomic_check_only); + +/** + * drm_atomic_commit - commit configuration atomically + * @state: atomic configuration to check + * + * Note that this function can return -EDEADLK if the driver needed to acquire + * more locks but encountered a deadlock. The caller must then do the usual w/w + * backoff dance and restart. All other errors are fatal. + * + * Also note that on successful execution ownership of @state is transferred + * from the caller of this function to the function itself. The caller must not + * free or in any other way access @state. If the function fails then the caller + * must clean up @state itself. + * + * Returns: + * 0 on success, negative error code on failure. + */ +int drm_atomic_commit(struct drm_atomic_state *state) +{ + struct drm_mode_config *config = &state->dev->mode_config; + int ret; + + ret = drm_atomic_check_only(state); + if (ret) + return ret; + + DRM_DEBUG_KMS("commiting %p\n", state); + + return config->funcs->atomic_commit(state->dev, state, false); +} +EXPORT_SYMBOL(drm_atomic_commit); + +/** + * drm_atomic_async_commit - atomic&async configuration commit + * @state: atomic configuration to check + * + * Note that this function can return -EDEADLK if the driver needed to acquire + * more locks but encountered a deadlock. The caller must then do the usual w/w + * backoff dance and restart. All other errors are fatal. + * + * Also note that on successful execution ownership of @state is transferred + * from the caller of this function to the function itself. The caller must not + * free or in any other way access @state. If the function fails then the caller + * must clean up @state itself. + * + * Returns: + * 0 on success, negative error code on failure. + */ +int drm_atomic_async_commit(struct drm_atomic_state *state) +{ + struct drm_mode_config *config = &state->dev->mode_config; + int ret; + + ret = drm_atomic_check_only(state); + if (ret) + return ret; + + DRM_DEBUG_KMS("commiting %p asynchronously\n", state); + + return config->funcs->atomic_commit(state->dev, state, true); +} +EXPORT_SYMBOL(drm_atomic_async_commit); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c new file mode 100644 index 000000000000..ca839bd9bb0d --- /dev/null +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -0,0 +1,1906 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_atomic_helper.h> +#include <linux/fence.h> + +/** + * DOC: overview + * + * This helper library provides implementations of check and commit functions on + * top of the CRTC modeset helper callbacks and the plane helper callbacks. It + * also provides convenience implementations for the atomic state handling + * callbacks for drivers which don't need to subclass the drm core structures to + * add their own additional internal state. + * + * This library also provides default implementations for the check callback in + * drm_atomic_helper_check and for the commit callback with + * drm_atomic_helper_commit. But the individual stages and callbacks are expose + * to allow drivers to mix and match and e.g. use the plane helpers only + * together with a driver private modeset implementation. + * + * This library also provides implementations for all the legacy driver + * interfaces on top of the atomic interface. See drm_atomic_helper_set_config, + * drm_atomic_helper_disable_plane, drm_atomic_helper_disable_plane and the + * various functions to implement set_property callbacks. New drivers must not + * implement these functions themselves but must use the provided helpers. + */ +static void +drm_atomic_helper_plane_changed(struct drm_atomic_state *state, + struct drm_plane_state *plane_state, + struct drm_plane *plane) +{ + struct drm_crtc_state *crtc_state; + + if (plane->state->crtc) { + crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)]; + + if (WARN_ON(!crtc_state)) + return; + + crtc_state->planes_changed = true; + } + + if (plane_state->crtc) { + crtc_state = + state->crtc_states[drm_crtc_index(plane_state->crtc)]; + + if (WARN_ON(!crtc_state)) + return; + + crtc_state->planes_changed = true; + } +} + +static struct drm_crtc * +get_current_crtc_for_encoder(struct drm_device *dev, + struct drm_encoder *encoder) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_connector *connector; + + WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); + + list_for_each_entry(connector, &config->connector_list, head) { + if (connector->state->best_encoder != encoder) + continue; + + return connector->state->crtc; + } + + return NULL; +} + +static int +steal_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder, + struct drm_crtc *encoder_crtc) +{ + struct drm_mode_config *config = &state->dev->mode_config; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + struct drm_connector_state *connector_state; + int ret; + + /* + * We can only steal an encoder coming from a connector, which means we + * must already hold the connection_mutex. + */ + WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); + + DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", + encoder->base.id, encoder->name, + encoder_crtc->base.id); + + crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + crtc_state->mode_changed = true; + + list_for_each_entry(connector, &config->connector_list, head) { + if (connector->state->best_encoder != encoder) + continue; + + DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + + connector_state = drm_atomic_get_connector_state(state, + connector); + if (IS_ERR(connector_state)) + return PTR_ERR(connector_state); + + ret = drm_atomic_set_crtc_for_connector(connector_state, NULL); + if (ret) + return ret; + connector_state->best_encoder = NULL; + } + + return 0; +} + +static int +update_connector_routing(struct drm_atomic_state *state, int conn_idx) +{ + struct drm_connector_helper_funcs *funcs; + struct drm_encoder *new_encoder; + struct drm_crtc *encoder_crtc; + struct drm_connector *connector; + struct drm_connector_state *connector_state; + struct drm_crtc_state *crtc_state; + int idx, ret; + + connector = state->connectors[conn_idx]; + connector_state = state->connector_states[conn_idx]; + + if (!connector) + return 0; + + DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + + if (connector->state->crtc != connector_state->crtc) { + if (connector->state->crtc) { + idx = drm_crtc_index(connector->state->crtc); + + crtc_state = state->crtc_states[idx]; + crtc_state->mode_changed = true; + } + + if (connector_state->crtc) { + idx = drm_crtc_index(connector_state->crtc); + + crtc_state = state->crtc_states[idx]; + crtc_state->mode_changed = true; + } + } + + if (!connector_state->crtc) { + DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + + connector_state->best_encoder = NULL; + + return 0; + } + + funcs = connector->helper_private; + new_encoder = funcs->best_encoder(connector); + + if (!new_encoder) { + DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + return -EINVAL; + } + + if (new_encoder == connector_state->best_encoder) { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + connector_state->crtc->base.id); + + return 0; + } + + encoder_crtc = get_current_crtc_for_encoder(state->dev, + new_encoder); + + if (encoder_crtc) { + ret = steal_encoder(state, new_encoder, encoder_crtc); + if (ret) { + DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + return ret; + } + } + + connector_state->best_encoder = new_encoder; + idx = drm_crtc_index(connector_state->crtc); + + crtc_state = state->crtc_states[idx]; + crtc_state->mode_changed = true; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + connector_state->crtc->base.id); + + return 0; +} + +static int +mode_fixup(struct drm_atomic_state *state) +{ + int ncrtcs = state->dev->mode_config.num_crtc; + int nconnectors = state->dev->mode_config.num_connector; + struct drm_crtc_state *crtc_state; + struct drm_connector_state *conn_state; + int i; + bool ret; + + for (i = 0; i < ncrtcs; i++) { + crtc_state = state->crtc_states[i]; + + if (!crtc_state || !crtc_state->mode_changed) + continue; + + drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode); + } + + for (i = 0; i < nconnectors; i++) { + struct drm_encoder_helper_funcs *funcs; + struct drm_encoder *encoder; + + conn_state = state->connector_states[i]; + + if (!conn_state) + continue; + + WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc); + + if (!conn_state->crtc || !conn_state->best_encoder) + continue; + + crtc_state = + state->crtc_states[drm_crtc_index(conn_state->crtc)]; + + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call ->mode_fixup twice. + */ + encoder = conn_state->best_encoder; + funcs = encoder->helper_private; + + if (encoder->bridge && encoder->bridge->funcs->mode_fixup) { + ret = encoder->bridge->funcs->mode_fixup( + encoder->bridge, &crtc_state->mode, + &crtc_state->adjusted_mode); + if (!ret) { + DRM_DEBUG_KMS("Bridge fixup failed\n"); + return -EINVAL; + } + } + + + ret = funcs->mode_fixup(encoder, &crtc_state->mode, + &crtc_state->adjusted_mode); + if (!ret) { + DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n", + encoder->base.id, encoder->name); + return -EINVAL; + } + } + + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc_helper_funcs *funcs; + struct drm_crtc *crtc; + + crtc_state = state->crtc_states[i]; + crtc = state->crtcs[i]; + + if (!crtc_state || !crtc_state->mode_changed) + continue; + + funcs = crtc->helper_private; + ret = funcs->mode_fixup(crtc, &crtc_state->mode, + &crtc_state->adjusted_mode); + if (!ret) { + DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n", + crtc->base.id); + return -EINVAL; + } + } + + return 0; +} + +static int +drm_atomic_helper_check_prepare(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int ncrtcs = dev->mode_config.num_crtc; + int nconnectors = dev->mode_config.num_connector; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i, ret; + + for (i = 0; i < ncrtcs; i++) { + crtc = state->crtcs[i]; + crtc_state = state->crtc_states[i]; + + if (!crtc) + continue; + + if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) { + DRM_DEBUG_KMS("[CRTC:%d] mode changed\n", + crtc->base.id); + crtc_state->mode_changed = true; + } + + if (crtc->state->enable != crtc_state->enable) { + DRM_DEBUG_KMS("[CRTC:%d] enable changed\n", + crtc->base.id); + crtc_state->mode_changed = true; + } + } + + for (i = 0; i < nconnectors; i++) { + /* + * This only sets crtc->mode_changed for routing changes, + * drivers must set crtc->mode_changed themselves when connector + * properties need to be updated. + */ + ret = update_connector_routing(state, i); + if (ret) + return ret; + } + + /* + * After all the routing has been prepared we need to add in any + * connector which is itself unchanged, but who's crtc changes it's + * configuration. This must be done before calling mode_fixup in case a + * crtc only changed its mode but has the same set of connectors. + */ + for (i = 0; i < ncrtcs; i++) { + int num_connectors; + + crtc = state->crtcs[i]; + crtc_state = state->crtc_states[i]; + + if (!crtc || !crtc_state->mode_changed) + continue; + + DRM_DEBUG_KMS("[CRTC:%d] needs full modeset, enable: %c\n", + crtc->base.id, + crtc_state->enable ? 'y' : 'n'); + + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret != 0) + return ret; + + num_connectors = drm_atomic_connectors_for_crtc(state, + crtc); + + if (crtc_state->enable != !!num_connectors) { + DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n", + crtc->base.id); + + return -EINVAL; + } + } + + return mode_fixup(state); +} + +/** + * drm_atomic_helper_check - validate state object + * @dev: DRM device + * @state: the driver state object + * + * Check the state object to see if the requested state is physically possible. + * Only crtcs and planes have check callbacks, so for any additional (global) + * checking that a driver needs it can simply wrap that around this function. + * Drivers without such needs can directly use this as their ->atomic_check() + * callback. + * + * RETURNS + * Zero for success or -errno + */ +int drm_atomic_helper_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int nplanes = dev->mode_config.num_total_plane; + int ncrtcs = dev->mode_config.num_crtc; + int i, ret = 0; + + ret = drm_atomic_helper_check_prepare(dev, state); + if (ret) + return ret; + + for (i = 0; i < nplanes; i++) { + struct drm_plane_helper_funcs *funcs; + struct drm_plane *plane = state->planes[i]; + struct drm_plane_state *plane_state = state->plane_states[i]; + + if (!plane) + continue; + + funcs = plane->helper_private; + + drm_atomic_helper_plane_changed(state, plane_state, plane); + + if (!funcs || !funcs->atomic_check) + continue; + + ret = funcs->atomic_check(plane, plane_state); + if (ret) { + DRM_DEBUG_KMS("[PLANE:%d] atomic check failed\n", + plane->base.id); + return ret; + } + } + + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc_helper_funcs *funcs; + struct drm_crtc *crtc = state->crtcs[i]; + + if (!crtc) + continue; + + funcs = crtc->helper_private; + + if (!funcs || !funcs->atomic_check) + continue; + + ret = funcs->atomic_check(crtc, state->crtc_states[i]); + if (ret) { + DRM_DEBUG_KMS("[CRTC:%d] atomic check failed\n", + crtc->base.id); + return ret; + } + } + + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_check); + +static void +disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) +{ + int ncrtcs = old_state->dev->mode_config.num_crtc; + int nconnectors = old_state->dev->mode_config.num_connector; + int i; + + for (i = 0; i < nconnectors; i++) { + struct drm_connector_state *old_conn_state; + struct drm_connector *connector; + struct drm_encoder_helper_funcs *funcs; + struct drm_encoder *encoder; + + old_conn_state = old_state->connector_states[i]; + connector = old_state->connectors[i]; + + /* Shut down everything that's in the changeset and currently + * still on. So need to check the old, saved state. */ + if (!old_conn_state || !old_conn_state->crtc) + continue; + + encoder = connector->state->best_encoder; + + if (!encoder) + continue; + + funcs = encoder->helper_private; + + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call call disable hooks twice. + */ + if (encoder->bridge) + encoder->bridge->funcs->disable(encoder->bridge); + + /* Right function depends upon target state. */ + if (connector->state->crtc) + funcs->prepare(encoder); + else if (funcs->disable) + funcs->disable(encoder); + else + funcs->dpms(encoder, DRM_MODE_DPMS_OFF); + + if (encoder->bridge) + encoder->bridge->funcs->post_disable(encoder->bridge); + } + + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc_helper_funcs *funcs; + struct drm_crtc *crtc; + + crtc = old_state->crtcs[i]; + + /* Shut down everything that needs a full modeset. */ + if (!crtc || !crtc->state->mode_changed) + continue; + + funcs = crtc->helper_private; + + /* Right function depends upon target state. */ + if (crtc->state->enable) + funcs->prepare(crtc); + else if (funcs->disable) + funcs->disable(crtc); + else + funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + } +} + +static void +set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state) +{ + int nconnectors = dev->mode_config.num_connector; + int ncrtcs = old_state->dev->mode_config.num_crtc; + int i; + + /* clear out existing links */ + for (i = 0; i < nconnectors; i++) { + struct drm_connector *connector; + + connector = old_state->connectors[i]; + + if (!connector || !connector->encoder) + continue; + + WARN_ON(!connector->encoder->crtc); + + connector->encoder->crtc = NULL; + connector->encoder = NULL; + } + + /* set new links */ + for (i = 0; i < nconnectors; i++) { + struct drm_connector *connector; + + connector = old_state->connectors[i]; + + if (!connector || !connector->state->crtc) + continue; + + if (WARN_ON(!connector->state->best_encoder)) + continue; + + connector->encoder = connector->state->best_encoder; + connector->encoder->crtc = connector->state->crtc; + } + + /* set legacy state in the crtc structure */ + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc *crtc; + + crtc = old_state->crtcs[i]; + + if (!crtc) + continue; + + crtc->mode = crtc->state->mode; + crtc->enabled = crtc->state->enable; + crtc->x = crtc->primary->state->src_x >> 16; + crtc->y = crtc->primary->state->src_y >> 16; + } +} + +static void +crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) +{ + int ncrtcs = old_state->dev->mode_config.num_crtc; + int nconnectors = old_state->dev->mode_config.num_connector; + int i; + + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc_helper_funcs *funcs; + struct drm_crtc *crtc; + + crtc = old_state->crtcs[i]; + + if (!crtc || !crtc->state->mode_changed) + continue; + + funcs = crtc->helper_private; + + if (crtc->state->enable) + funcs->mode_set_nofb(crtc); + } + + for (i = 0; i < nconnectors; i++) { + struct drm_connector *connector; + struct drm_crtc_state *new_crtc_state; + struct drm_encoder_helper_funcs *funcs; + struct drm_encoder *encoder; + struct drm_display_mode *mode, *adjusted_mode; + + connector = old_state->connectors[i]; + + if (!connector || !connector->state->best_encoder) + continue; + + encoder = connector->state->best_encoder; + funcs = encoder->helper_private; + new_crtc_state = connector->state->crtc->state; + mode = &new_crtc_state->mode; + adjusted_mode = &new_crtc_state->adjusted_mode; + + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call call mode_set hooks twice. + */ + funcs->mode_set(encoder, mode, adjusted_mode); + + if (encoder->bridge && encoder->bridge->funcs->mode_set) + encoder->bridge->funcs->mode_set(encoder->bridge, + mode, adjusted_mode); + } +} + +/** + * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates + * @dev: DRM device + * @state: atomic state + * + * This function commits the modeset changes that need to be committed before + * updating planes. It shuts down all the outputs that need to be shut down and + * prepares them (if required) with the new mode. + */ +void drm_atomic_helper_commit_pre_planes(struct drm_device *dev, + struct drm_atomic_state *state) +{ + disable_outputs(dev, state); + set_routing_links(dev, state); + crtc_set_mode(dev, state); +} +EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes); + +/** + * drm_atomic_helper_commit_post_planes - modeset commit after plane updates + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * This function commits the modeset changes that need to be committed after + * updating planes: It enables all the outputs with the new configuration which + * had to be turned off for the update. + */ +void drm_atomic_helper_commit_post_planes(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + int ncrtcs = old_state->dev->mode_config.num_crtc; + int nconnectors = old_state->dev->mode_config.num_connector; + int i; + + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc_helper_funcs *funcs; + struct drm_crtc *crtc; + + crtc = old_state->crtcs[i]; + + /* Need to filter out CRTCs where only planes change. */ + if (!crtc || !crtc->state->mode_changed) + continue; + + funcs = crtc->helper_private; + + if (crtc->state->enable) + funcs->commit(crtc); + } + + for (i = 0; i < nconnectors; i++) { + struct drm_connector *connector; + struct drm_encoder_helper_funcs *funcs; + struct drm_encoder *encoder; + + connector = old_state->connectors[i]; + + if (!connector || !connector->state->best_encoder) + continue; + + encoder = connector->state->best_encoder; + funcs = encoder->helper_private; + + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call call enable hooks twice. + */ + if (encoder->bridge) + encoder->bridge->funcs->pre_enable(encoder->bridge); + + funcs->commit(encoder); + + if (encoder->bridge) + encoder->bridge->funcs->enable(encoder->bridge); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes); + +static void wait_for_fences(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int nplanes = dev->mode_config.num_total_plane; + int i; + + for (i = 0; i < nplanes; i++) { + struct drm_plane *plane = state->planes[i]; + + if (!plane || !plane->state->fence) + continue; + + WARN_ON(!plane->state->fb); + + fence_wait(plane->state->fence, false); + fence_put(plane->state->fence); + plane->state->fence = NULL; + } +} + +static void +wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + int ncrtcs = old_state->dev->mode_config.num_crtc; + int i, ret; + + for (i = 0; i < ncrtcs; i++) { + crtc = old_state->crtcs[i]; + old_crtc_state = old_state->crtc_states[i]; + + if (!crtc) + continue; + + /* No one cares about the old state, so abuse it for tracking + * and store whether we hold a vblank reference (and should do a + * vblank wait) in the ->enable boolean. */ + old_crtc_state->enable = false; + + if (!crtc->state->enable) + continue; + + ret = drm_crtc_vblank_get(crtc); + if (ret != 0) + continue; + + old_crtc_state->enable = true; + old_crtc_state->last_vblank_count = drm_vblank_count(dev, i); + } + + for (i = 0; i < ncrtcs; i++) { + crtc = old_state->crtcs[i]; + old_crtc_state = old_state->crtc_states[i]; + + if (!crtc || !old_crtc_state->enable) + continue; + + ret = wait_event_timeout(dev->vblank[i].queue, + old_crtc_state->last_vblank_count != + drm_vblank_count(dev, i), + msecs_to_jiffies(50)); + + drm_crtc_vblank_put(crtc); + } +} + +/** + * drm_atomic_helper_commit - commit validated state object + * @dev: DRM device + * @state: the driver state object + * @async: asynchronous commit + * + * This function commits a with drm_atomic_helper_check() pre-validated state + * object. This can still fail when e.g. the framebuffer reservation fails. For + * now this doesn't implement asynchronous commits. + * + * RETURNS + * Zero for success or -errno. + */ +int drm_atomic_helper_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool async) +{ + int ret; + + if (async) + return -EBUSY; + + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) + return ret; + + /* + * This is the point of no return - everything below never fails except + * when the hw goes bonghits. Which means we can commit the new state on + * the software side now. + */ + + drm_atomic_helper_swap_state(dev, state); + + /* + * Everything below can be run asynchronously without the need to grab + * any modeset locks at all under one conditions: It must be guaranteed + * that the asynchronous work has either been cancelled (if the driver + * supports it, which at least requires that the framebuffers get + * cleaned up with drm_atomic_helper_cleanup_planes()) or completed + * before the new state gets committed on the software side with + * drm_atomic_helper_swap_state(). + * + * This scheme allows new atomic state updates to be prepared and + * checked in parallel to the asynchronous completion of the previous + * update. Which is important since compositors need to figure out the + * composition of the next frame right after having submitted the + * current layout. + */ + + wait_for_fences(dev, state); + + drm_atomic_helper_commit_pre_planes(dev, state); + + drm_atomic_helper_commit_planes(dev, state); + + drm_atomic_helper_commit_post_planes(dev, state); + + wait_for_vblanks(dev, state); + + drm_atomic_helper_cleanup_planes(dev, state); + + drm_atomic_state_free(state); + + return 0; +} +EXPORT_SYMBOL(drm_atomic_helper_commit); + +/** + * DOC: implementing async commit + * + * For now the atomic helpers don't support async commit directly. If there is + * real need it could be added though, using the dma-buf fence infrastructure + * for generic synchronization with outstanding rendering. + * + * For now drivers have to implement async commit themselves, with the following + * sequence being the recommended one: + * + * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function + * which commit needs to call which can fail, so we want to run it first and + * synchronously. + * + * 2. Synchronize with any outstanding asynchronous commit worker threads which + * might be affected the new state update. This can be done by either cancelling + * or flushing the work items, depending upon whether the driver can deal with + * cancelled updates. Note that it is important to ensure that the framebuffer + * cleanup is still done when cancelling. + * + * For sufficient parallelism it is recommended to have a work item per crtc + * (for updates which don't touch global state) and a global one. Then we only + * need to synchronize with the crtc work items for changed crtcs and the global + * work item, which allows nice concurrent updates on disjoint sets of crtcs. + * + * 3. The software state is updated synchronously with + * drm_atomic_helper_swap_state. Doing this under the protection of all modeset + * locks means concurrent callers never see inconsistent state. And doing this + * while it's guaranteed that no relevant async worker runs means that async + * workers do not need grab any locks. Actually they must not grab locks, for + * otherwise the work flushing will deadlock. + * + * 4. Schedule a work item to do all subsequent steps, using the split-out + * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and + * then cleaning up the framebuffers after the old framebuffer is no longer + * being displayed. + */ + +/** + * drm_atomic_helper_prepare_planes - prepare plane resources after commit + * @dev: DRM device + * @state: atomic state object with old state structures + * + * This function prepares plane state, specifically framebuffers, for the new + * configuration. If any failure is encountered this function will call + * ->cleanup_fb on any already successfully prepared framebuffer. + * + * Returns: + * 0 on success, negative error code on failure. + */ +int drm_atomic_helper_prepare_planes(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int nplanes = dev->mode_config.num_total_plane; + int ret, i; + + for (i = 0; i < nplanes; i++) { + struct drm_plane_helper_funcs *funcs; + struct drm_plane *plane = state->planes[i]; + struct drm_framebuffer *fb; + + if (!plane) + continue; + + funcs = plane->helper_private; + + fb = state->plane_states[i]->fb; + + if (fb && funcs->prepare_fb) { + ret = funcs->prepare_fb(plane, fb); + if (ret) + goto fail; + } + } + + return 0; + +fail: + for (i--; i >= 0; i--) { + struct drm_plane_helper_funcs *funcs; + struct drm_plane *plane = state->planes[i]; + struct drm_framebuffer *fb; + + if (!plane) + continue; + + funcs = plane->helper_private; + + fb = state->plane_states[i]->fb; + + if (fb && funcs->cleanup_fb) + funcs->cleanup_fb(plane, fb); + + } + + return ret; +} +EXPORT_SYMBOL(drm_atomic_helper_prepare_planes); + +/** + * drm_atomic_helper_commit_planes - commit plane state + * @dev: DRM device + * @state: atomic state + * + * This function commits the new plane state using the plane and atomic helper + * functions for planes and crtcs. It assumes that the atomic state has already + * been pushed into the relevant object state pointers, since this step can no + * longer fail. + * + * It still requires the global state object @state to know which planes and + * crtcs need to be updated though. + */ +void drm_atomic_helper_commit_planes(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int nplanes = dev->mode_config.num_total_plane; + int ncrtcs = dev->mode_config.num_crtc; + int i; + + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc_helper_funcs *funcs; + struct drm_crtc *crtc = state->crtcs[i]; + + if (!crtc) + continue; + + funcs = crtc->helper_private; + + if (!funcs || !funcs->atomic_begin) + continue; + + funcs->atomic_begin(crtc); + } + + for (i = 0; i < nplanes; i++) { + struct drm_plane_helper_funcs *funcs; + struct drm_plane *plane = state->planes[i]; + + if (!plane) + continue; + + funcs = plane->helper_private; + + if (!funcs || !funcs->atomic_update) + continue; + + funcs->atomic_update(plane); + } + + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc_helper_funcs *funcs; + struct drm_crtc *crtc = state->crtcs[i]; + + if (!crtc) + continue; + + funcs = crtc->helper_private; + + if (!funcs || !funcs->atomic_flush) + continue; + + funcs->atomic_flush(crtc); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_planes); + +/** + * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * This function cleans up plane state, specifically framebuffers, from the old + * configuration. Hence the old configuration must be perserved in @old_state to + * be able to call this function. + * + * This function must also be called on the new state when the atomic update + * fails at any point after calling drm_atomic_helper_prepare_planes(). + */ +void drm_atomic_helper_cleanup_planes(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + int nplanes = dev->mode_config.num_total_plane; + int i; + + for (i = 0; i < nplanes; i++) { + struct drm_plane_helper_funcs *funcs; + struct drm_plane *plane = old_state->planes[i]; + struct drm_framebuffer *old_fb; + + if (!plane) + continue; + + funcs = plane->helper_private; + + old_fb = old_state->plane_states[i]->fb; + + if (old_fb && funcs->cleanup_fb) + funcs->cleanup_fb(plane, old_fb); + } +} +EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); + +/** + * drm_atomic_helper_swap_state - store atomic state into current sw state + * @dev: DRM device + * @state: atomic state + * + * This function stores the atomic state into the current state pointers in all + * driver objects. It should be called after all failing steps have been done + * and succeeded, but before the actual hardware state is committed. + * + * For cleanup and error recovery the current state for all changed objects will + * be swaped into @state. + * + * With that sequence it fits perfectly into the plane prepare/cleanup sequence: + * + * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state. + * + * 2. Do any other steps that might fail. + * + * 3. Put the staged state into the current state pointers with this function. + * + * 4. Actually commit the hardware state. + * + * 5. Call drm_atomic_helper_cleanup_planes with @state, which since step 3 + * contains the old state. Also do any other cleanup required with that state. + */ +void drm_atomic_helper_swap_state(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int i; + + for (i = 0; i < dev->mode_config.num_connector; i++) { + struct drm_connector *connector = state->connectors[i]; + + if (!connector) + continue; + + connector->state->state = state; + swap(state->connector_states[i], connector->state); + connector->state->state = NULL; + } + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct drm_crtc *crtc = state->crtcs[i]; + + if (!crtc) + continue; + + crtc->state->state = state; + swap(state->crtc_states[i], crtc->state); + crtc->state->state = NULL; + } + + for (i = 0; i < dev->mode_config.num_total_plane; i++) { + struct drm_plane *plane = state->planes[i]; + + if (!plane) + continue; + + plane->state->state = state; + swap(state->plane_states[i], plane->state); + plane->state->state = NULL; + } +} +EXPORT_SYMBOL(drm_atomic_helper_swap_state); + +/** + * drm_atomic_helper_update_plane - Helper for primary plane update using atomic + * @plane: plane object to update + * @crtc: owning CRTC of owning plane + * @fb: framebuffer to flip onto plane + * @crtc_x: x offset of primary plane on crtc + * @crtc_y: y offset of primary plane on crtc + * @crtc_w: width of primary plane rectangle on crtc + * @crtc_h: height of primary plane rectangle on crtc + * @src_x: x offset of @fb for panning + * @src_y: y offset of @fb for panning + * @src_w: width of source rectangle in @fb + * @src_h: height of source rectangle in @fb + * + * Provides a default plane update handler using the atomic driver interface. + * + * RETURNS: + * Zero on success, error code on failure + */ +int drm_atomic_helper_update_plane(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_atomic_state *state; + struct drm_plane_state *plane_state; + int ret = 0; + + state = drm_atomic_state_alloc(plane->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); +retry: + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto fail; + } + + ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); + if (ret != 0) + goto fail; + drm_atomic_set_fb_for_plane(plane_state, fb); + plane_state->crtc_x = crtc_x; + plane_state->crtc_y = crtc_y; + plane_state->crtc_h = crtc_h; + plane_state->crtc_w = crtc_w; + plane_state->src_x = src_x; + plane_state->src_y = src_y; + plane_state->src_h = src_h; + plane_state->src_w = src_w; + + ret = drm_atomic_commit(state); + if (ret != 0) + goto fail; + + /* Driver takes ownership of state on successful commit. */ + return 0; +fail: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_free(state); + + return ret; +backoff: + drm_atomic_legacy_backoff(state); + drm_atomic_state_clear(state); + + /* + * Someone might have exchanged the framebuffer while we dropped locks + * in the backoff code. We need to fix up the fb refcount tracking the + * core does for us. + */ + plane->old_fb = plane->fb; + + goto retry; +} +EXPORT_SYMBOL(drm_atomic_helper_update_plane); + +/** + * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic + * @plane: plane to disable + * + * Provides a default plane disable handler using the atomic driver interface. + * + * RETURNS: + * Zero on success, error code on failure + */ +int drm_atomic_helper_disable_plane(struct drm_plane *plane) +{ + struct drm_atomic_state *state; + struct drm_plane_state *plane_state; + int ret = 0; + + state = drm_atomic_state_alloc(plane->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = drm_modeset_legacy_acquire_ctx(plane->crtc); +retry: + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto fail; + } + + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret != 0) + goto fail; + drm_atomic_set_fb_for_plane(plane_state, NULL); + plane_state->crtc_x = 0; + plane_state->crtc_y = 0; + plane_state->crtc_h = 0; + plane_state->crtc_w = 0; + plane_state->src_x = 0; + plane_state->src_y = 0; + plane_state->src_h = 0; + plane_state->src_w = 0; + + ret = drm_atomic_commit(state); + if (ret != 0) + goto fail; + + /* Driver takes ownership of state on successful commit. */ + return 0; +fail: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_free(state); + + return ret; +backoff: + drm_atomic_legacy_backoff(state); + drm_atomic_state_clear(state); + + /* + * Someone might have exchanged the framebuffer while we dropped locks + * in the backoff code. We need to fix up the fb refcount tracking the + * core does for us. + */ + plane->old_fb = plane->fb; + + goto retry; +} +EXPORT_SYMBOL(drm_atomic_helper_disable_plane); + +static int update_output_state(struct drm_atomic_state *state, + struct drm_mode_set *set) +{ + struct drm_device *dev = set->crtc->dev; + struct drm_connector_state *conn_state; + int nconnectors = state->dev->mode_config.num_connector; + int ncrtcs = state->dev->mode_config.num_crtc; + int ret, i, j; + + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, + state->acquire_ctx); + if (ret) + return ret; + + /* First grab all affected connector/crtc states. */ + for (i = 0; i < set->num_connectors; i++) { + conn_state = drm_atomic_get_connector_state(state, + set->connectors[i]); + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); + } + + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc *crtc = state->crtcs[i]; + + if (!crtc) + continue; + + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret) + return ret; + } + + /* Then recompute connector->crtc links and crtc enabling state. */ + for (i = 0; i < nconnectors; i++) { + struct drm_connector *connector; + + connector = state->connectors[i]; + conn_state = state->connector_states[i]; + + if (!connector) + continue; + + if (conn_state->crtc == set->crtc) { + ret = drm_atomic_set_crtc_for_connector(conn_state, + NULL); + if (ret) + return ret; + } + + for (j = 0; j < set->num_connectors; j++) { + if (set->connectors[j] == connector) { + ret = drm_atomic_set_crtc_for_connector(conn_state, + set->crtc); + if (ret) + return ret; + break; + } + } + } + + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc *crtc = state->crtcs[i]; + struct drm_crtc_state *crtc_state = state->crtc_states[i]; + + if (!crtc) + continue; + + /* Don't update ->enable for the CRTC in the set_config request, + * since a mismatch would indicate a bug in the upper layers. + * The actual modeset code later on will catch any + * inconsistencies here. */ + if (crtc == set->crtc) + continue; + + crtc_state->enable = + drm_atomic_connectors_for_crtc(state, crtc); + } + + return 0; +} + +/** + * drm_atomic_helper_set_config - set a new config from userspace + * @set: mode set configuration + * + * Provides a default crtc set_config handler using the atomic driver interface. + * + * Returns: + * Returns 0 on success, negative errno numbers on failure. + */ +int drm_atomic_helper_set_config(struct drm_mode_set *set) +{ + struct drm_atomic_state *state; + struct drm_crtc *crtc = set->crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane_state *primary_state; + int ret = 0; + + state = drm_atomic_state_alloc(crtc->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); +retry: + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto fail; + } + + if (!set->mode) { + WARN_ON(set->fb); + WARN_ON(set->num_connectors); + + crtc_state->enable = false; + goto commit; + } + + WARN_ON(!set->fb); + WARN_ON(!set->num_connectors); + + crtc_state->enable = true; + drm_mode_copy(&crtc_state->mode, set->mode); + + primary_state = drm_atomic_get_plane_state(state, crtc->primary); + if (IS_ERR(primary_state)) { + ret = PTR_ERR(primary_state); + goto fail; + } + + ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); + if (ret != 0) + goto fail; + drm_atomic_set_fb_for_plane(primary_state, set->fb); + primary_state->crtc_x = 0; + primary_state->crtc_y = 0; + primary_state->crtc_h = set->mode->vdisplay; + primary_state->crtc_w = set->mode->hdisplay; + primary_state->src_x = set->x << 16; + primary_state->src_y = set->y << 16; + primary_state->src_h = set->mode->vdisplay << 16; + primary_state->src_w = set->mode->hdisplay << 16; + +commit: + ret = update_output_state(state, set); + if (ret) + goto fail; + + ret = drm_atomic_commit(state); + if (ret != 0) + goto fail; + + /* Driver takes ownership of state on successful commit. */ + return 0; +fail: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_free(state); + + return ret; +backoff: + drm_atomic_legacy_backoff(state); + drm_atomic_state_clear(state); + + /* + * Someone might have exchanged the framebuffer while we dropped locks + * in the backoff code. We need to fix up the fb refcount tracking the + * core does for us. + */ + crtc->primary->old_fb = crtc->primary->fb; + + goto retry; +} +EXPORT_SYMBOL(drm_atomic_helper_set_config); + +/** + * drm_atomic_helper_crtc_set_property - helper for crtc prorties + * @crtc: DRM crtc + * @property: DRM property + * @val: value of property + * + * Provides a default plane disablle handler using the atomic driver interface. + * + * RETURNS: + * Zero on success, error code on failure + */ +int +drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, + struct drm_property *property, + uint64_t val) +{ + struct drm_atomic_state *state; + struct drm_crtc_state *crtc_state; + int ret = 0; + + state = drm_atomic_state_alloc(crtc->dev); + if (!state) + return -ENOMEM; + + /* ->set_property is always called with all locks held. */ + state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; +retry: + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto fail; + } + + ret = crtc->funcs->atomic_set_property(crtc, crtc_state, + property, val); + if (ret) + goto fail; + + ret = drm_atomic_commit(state); + if (ret != 0) + goto fail; + + /* Driver takes ownership of state on successful commit. */ + return 0; +fail: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_free(state); + + return ret; +backoff: + drm_atomic_legacy_backoff(state); + drm_atomic_state_clear(state); + + goto retry; +} +EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property); + +/** + * drm_atomic_helper_plane_set_property - helper for plane prorties + * @plane: DRM plane + * @property: DRM property + * @val: value of property + * + * Provides a default plane disable handler using the atomic driver interface. + * + * RETURNS: + * Zero on success, error code on failure + */ +int +drm_atomic_helper_plane_set_property(struct drm_plane *plane, + struct drm_property *property, + uint64_t val) +{ + struct drm_atomic_state *state; + struct drm_plane_state *plane_state; + int ret = 0; + + state = drm_atomic_state_alloc(plane->dev); + if (!state) + return -ENOMEM; + + /* ->set_property is always called with all locks held. */ + state->acquire_ctx = plane->dev->mode_config.acquire_ctx; +retry: + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto fail; + } + + ret = plane->funcs->atomic_set_property(plane, plane_state, + property, val); + if (ret) + goto fail; + + ret = drm_atomic_commit(state); + if (ret != 0) + goto fail; + + /* Driver takes ownership of state on successful commit. */ + return 0; +fail: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_free(state); + + return ret; +backoff: + drm_atomic_legacy_backoff(state); + drm_atomic_state_clear(state); + + goto retry; +} +EXPORT_SYMBOL(drm_atomic_helper_plane_set_property); + +/** + * drm_atomic_helper_connector_set_property - helper for connector prorties + * @connector: DRM connector + * @property: DRM property + * @val: value of property + * + * Provides a default plane disablle handler using the atomic driver interface. + * + * RETURNS: + * Zero on success, error code on failure + */ +int +drm_atomic_helper_connector_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct drm_atomic_state *state; + struct drm_connector_state *connector_state; + int ret = 0; + + state = drm_atomic_state_alloc(connector->dev); + if (!state) + return -ENOMEM; + + /* ->set_property is always called with all locks held. */ + state->acquire_ctx = connector->dev->mode_config.acquire_ctx; +retry: + connector_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(connector_state)) { + ret = PTR_ERR(connector_state); + goto fail; + } + + ret = connector->funcs->atomic_set_property(connector, connector_state, + property, val); + if (ret) + goto fail; + + ret = drm_atomic_commit(state); + if (ret != 0) + goto fail; + + /* Driver takes ownership of state on successful commit. */ + return 0; +fail: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_free(state); + + return ret; +backoff: + drm_atomic_legacy_backoff(state); + drm_atomic_state_clear(state); + + goto retry; +} +EXPORT_SYMBOL(drm_atomic_helper_connector_set_property); + +/** + * drm_atomic_helper_page_flip - execute a legacy page flip + * @crtc: DRM crtc + * @fb: DRM framebuffer + * @event: optional DRM event to signal upon completion + * @flags: flip flags for non-vblank sync'ed updates + * + * Provides a default page flip implementation using the atomic driver interface. + * + * Note that for now so called async page flips (i.e. updates which are not + * synchronized to vblank) are not supported, since the atomic interfaces have + * no provisions for this yet. + * + * Returns: + * Returns 0 on success, negative errno numbers on failure. + */ +int drm_atomic_helper_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags) +{ + struct drm_plane *plane = crtc->primary; + struct drm_atomic_state *state; + struct drm_plane_state *plane_state; + struct drm_crtc_state *crtc_state; + int ret = 0; + + if (flags & DRM_MODE_PAGE_FLIP_ASYNC) + return -EINVAL; + + state = drm_atomic_state_alloc(plane->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); +retry: + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto fail; + } + crtc_state->event = event; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto fail; + } + + ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); + if (ret != 0) + goto fail; + drm_atomic_set_fb_for_plane(plane_state, fb); + + ret = drm_atomic_async_commit(state); + if (ret != 0) + goto fail; + + /* TODO: ->page_flip is the only driver callback where the core + * doesn't update plane->fb. For now patch it up here. */ + plane->fb = plane->state->fb; + + /* Driver takes ownership of state on successful async commit. */ + return 0; +fail: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_free(state); + + return ret; +backoff: + drm_atomic_legacy_backoff(state); + drm_atomic_state_clear(state); + + /* + * Someone might have exchanged the framebuffer while we dropped locks + * in the backoff code. We need to fix up the fb refcount tracking the + * core does for us. + */ + plane->old_fb = plane->fb; + + goto retry; +} +EXPORT_SYMBOL(drm_atomic_helper_page_flip); + +/** + * DOC: atomic state reset and initialization + * + * Both the drm core and the atomic helpers assume that there is always the full + * and correct atomic software state for all connectors, CRTCs and planes + * available. Which is a bit a problem on driver load and also after system + * suspend. One way to solve this is to have a hardware state read-out + * infrastructure which reconstructs the full software state (e.g. the i915 + * driver). + * + * The simpler solution is to just reset the software state to everything off, + * which is easiest to do by calling drm_mode_config_reset(). To facilitate this + * the atomic helpers provide default reset implementations for all hooks. + */ + +/** + * drm_atomic_helper_crtc_reset - default ->reset hook for CRTCs + * @crtc: drm CRTC + * + * Resets the atomic state for @crtc by freeing the state pointer (which might + * be NULL, e.g. at driver load time) and allocating a new empty state object. + */ +void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc) +{ + kfree(crtc->state); + crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL); +} +EXPORT_SYMBOL(drm_atomic_helper_crtc_reset); + +/** + * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook + * @crtc: drm CRTC + * + * Default CRTC state duplicate hook for drivers which don't have their own + * subclassed CRTC state structure. + */ +struct drm_crtc_state * +drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct drm_crtc_state *state; + + if (WARN_ON(!crtc->state)) + return NULL; + + state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL); + + if (state) { + state->mode_changed = false; + state->planes_changed = false; + state->event = NULL; + } + + return state; +} +EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state); + +/** + * drm_atomic_helper_crtc_destroy_state - default state destroy hook + * @crtc: drm CRTC + * @state: CRTC state object to release + * + * Default CRTC state destroy hook for drivers which don't have their own + * subclassed CRTC state structure. + */ +void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + kfree(state); +} +EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state); + +/** + * drm_atomic_helper_plane_reset - default ->reset hook for planes + * @plane: drm plane + * + * Resets the atomic state for @plane by freeing the state pointer (which might + * be NULL, e.g. at driver load time) and allocating a new empty state object. + */ +void drm_atomic_helper_plane_reset(struct drm_plane *plane) +{ + if (plane->state && plane->state->fb) + drm_framebuffer_unreference(plane->state->fb); + + kfree(plane->state); + plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL); +} +EXPORT_SYMBOL(drm_atomic_helper_plane_reset); + +/** + * drm_atomic_helper_plane_duplicate_state - default state duplicate hook + * @plane: drm plane + * + * Default plane state duplicate hook for drivers which don't have their own + * subclassed plane state structure. + */ +struct drm_plane_state * +drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane) +{ + struct drm_plane_state *state; + + if (WARN_ON(!plane->state)) + return NULL; + + state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL); + + if (state && state->fb) + drm_framebuffer_reference(state->fb); + + return state; +} +EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state); + +/** + * drm_atomic_helper_plane_destroy_state - default state destroy hook + * @plane: drm plane + * @state: plane state object to release + * + * Default plane state destroy hook for drivers which don't have their own + * subclassed plane state structure. + */ +void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + if (state->fb) + drm_framebuffer_unreference(state->fb); + + kfree(state); +} +EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); + +/** + * drm_atomic_helper_connector_reset - default ->reset hook for connectors + * @connector: drm connector + * + * Resets the atomic state for @connector by freeing the state pointer (which + * might be NULL, e.g. at driver load time) and allocating a new empty state + * object. + */ +void drm_atomic_helper_connector_reset(struct drm_connector *connector) +{ + kfree(connector->state); + connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL); +} +EXPORT_SYMBOL(drm_atomic_helper_connector_reset); + +/** + * drm_atomic_helper_connector_duplicate_state - default state duplicate hook + * @connector: drm connector + * + * Default connector state duplicate hook for drivers which don't have their own + * subclassed connector state structure. + */ +struct drm_connector_state * +drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector) +{ + if (WARN_ON(!connector->state)) + return NULL; + + return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL); +} +EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); + +/** + * drm_atomic_helper_connector_destroy_state - default state destroy hook + * @connector: drm connector + * @state: connector state object to release + * + * Default connector state destroy hook for drivers which don't have their own + * subclassed connector state structure. + */ +void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state) +{ + kfree(state); +} +EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index e79c8d3700d8..e6c169152bf1 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -766,7 +766,6 @@ static void drm_mode_remove(struct drm_connector *connector, /** * drm_connector_get_cmdline_mode - reads the user's cmdline mode * @connector: connector to quwery - * @mode: returned mode * * The kernel supports per-connector configration of its consoles through * use of the video= parameter. This function parses that option and @@ -2943,7 +2942,7 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format); * @file_priv: drm file for the ioctl call * * Add a new FB to the specified CRTC, given a user request. This is the - * original addfb ioclt which only supported RGB formats. + * original addfb ioctl which only supported RGB formats. * * Called by the user via ioctl. * @@ -2955,11 +2954,9 @@ int drm_mode_addfb(struct drm_device *dev, { struct drm_mode_fb_cmd *or = data; struct drm_mode_fb_cmd2 r = {}; - struct drm_mode_config *config = &dev->mode_config; - struct drm_framebuffer *fb; - int ret = 0; + int ret; - /* Use new struct with format internally */ + /* convert to new format and call new ioctl */ r.fb_id = or->fb_id; r.width = or->width; r.height = or->height; @@ -2967,26 +2964,11 @@ int drm_mode_addfb(struct drm_device *dev, r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); r.handles[0] = or->handle; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - if ((config->min_width > r.width) || (r.width > config->max_width)) - return -EINVAL; - - if ((config->min_height > r.height) || (r.height > config->max_height)) - return -EINVAL; - - fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); - if (IS_ERR(fb)) { - DRM_DEBUG_KMS("could not create framebuffer\n"); - return PTR_ERR(fb); - } + ret = drm_mode_addfb2(dev, &r, file_priv); + if (ret) + return ret; - mutex_lock(&file_priv->fbs_lock); - or->fb_id = fb->base.id; - list_add(&fb->filp_head, &file_priv->fbs); - DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); - mutex_unlock(&file_priv->fbs_lock); + or->fb_id = r.fb_id; return ret; } @@ -3080,7 +3062,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) num_planes = drm_format_num_planes(r->pixel_format); if (r->width == 0 || r->width % hsub) { - DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height); + DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width); return -EINVAL; } @@ -3435,6 +3417,10 @@ void drm_fb_release(struct drm_file *priv) * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * + * Note that the DRM core keeps a per-device list of properties and that, if + * drm_mode_config_cleanup() is called, it will destroy all properties created + * by the driver. + * * Returns: * A pointer to the newly created property on success, NULL on failure. */ @@ -3611,7 +3597,7 @@ static struct drm_property *property_create_range(struct drm_device *dev, * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * - * Userspace is allowed to set any interger value in the (min, max) range + * Userspace is allowed to set any integer value in the (min, max) range * inclusive. * * Returns: @@ -4019,6 +4005,19 @@ done: return ret; } +/** + * drm_mode_connector_set_path_property - set tile property on connector + * @connector: connector to set property on. + * @path: path to use for property. + * + * This creates a property to expose to userspace to specify a + * connector path. This is mainly used for DisplayPort MST where + * connectors have a topology and we want to allow userspace to give + * them more meaningful names. + * + * Returns: + * Zero on success, errno on failure. + */ int drm_mode_connector_set_path_property(struct drm_connector *connector, char *path) { diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 6c65a0a28fbd..d552708409de 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -34,12 +34,35 @@ #include <linux/moduleparam.h> #include <drm/drmP.h> +#include <drm/drm_atomic.h> #include <drm/drm_crtc.h> #include <drm/drm_fourcc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> +/** + * DOC: overview + * + * The CRTC modeset helper library provides a default set_config implementation + * in drm_crtc_helper_set_config(). Plus a few other convenience functions using + * the same callbacks which drivers can use to e.g. restore the modeset + * configuration on resume with drm_helper_resume_force_mode(). + * + * The driver callbacks are mostly compatible with the atomic modeset helpers, + * except for the handling of the primary plane: Atomic helpers require that the + * primary plane is implemented as a real standalone plane and not directly tied + * to the CRTC state. For easier transition this library provides functions to + * implement the old semantics required by the CRTC helpers using the new plane + * and atomic helper callbacks. + * + * Drivers are strongly urged to convert to the atomic helpers (by way of first + * converting to the plane helpers). New drivers must not use these functions + * but need to implement the atomic interface instead, potentially using the + * atomic helpers for that. + */ MODULE_AUTHOR("David Airlie, Jesse Barnes"); MODULE_DESCRIPTION("DRM KMS helper"); MODULE_LICENSE("GPL and additional rights"); @@ -888,3 +911,112 @@ void drm_helper_resume_force_mode(struct drm_device *dev) drm_modeset_unlock_all(dev); } EXPORT_SYMBOL(drm_helper_resume_force_mode); + +/** + * drm_helper_crtc_mode_set - mode_set implementation for atomic plane helpers + * @crtc: DRM CRTC + * @mode: DRM display mode which userspace requested + * @adjusted_mode: DRM display mode adjusted by ->mode_fixup callbacks + * @x: x offset of the CRTC scanout area on the underlying framebuffer + * @y: y offset of the CRTC scanout area on the underlying framebuffer + * @old_fb: previous framebuffer + * + * This function implements a callback useable as the ->mode_set callback + * required by the crtc helpers. Besides the atomic plane helper functions for + * the primary plane the driver must also provide the ->mode_set_nofb callback + * to set up the crtc. + * + * This is a transitional helper useful for converting drivers to the atomic + * interfaces. + */ +int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + int ret; + + if (crtc->funcs->atomic_duplicate_state) + crtc_state = crtc->funcs->atomic_duplicate_state(crtc); + else if (crtc->state) + crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), + GFP_KERNEL); + else + crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); + if (!crtc_state) + return -ENOMEM; + + crtc_state->enable = true; + crtc_state->planes_changed = true; + crtc_state->mode_changed = true; + drm_mode_copy(&crtc_state->mode, mode); + drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode); + + if (crtc_funcs->atomic_check) { + ret = crtc_funcs->atomic_check(crtc, crtc_state); + if (ret) { + kfree(crtc_state); + + return ret; + } + } + + swap(crtc->state, crtc_state); + + crtc_funcs->mode_set_nofb(crtc); + + if (crtc_state) { + if (crtc->funcs->atomic_destroy_state) + crtc->funcs->atomic_destroy_state(crtc, crtc_state); + else + kfree(crtc_state); + } + + return drm_helper_crtc_mode_set_base(crtc, x, y, old_fb); +} +EXPORT_SYMBOL(drm_helper_crtc_mode_set); + +/** + * drm_helper_crtc_mode_set_base - mode_set_base implementation for atomic plane helpers + * @crtc: DRM CRTC + * @x: x offset of the CRTC scanout area on the underlying framebuffer + * @y: y offset of the CRTC scanout area on the underlying framebuffer + * @old_fb: previous framebuffer + * + * This function implements a callback useable as the ->mode_set_base used + * required by the crtc helpers. The driver must provide the atomic plane helper + * functions for the primary plane. + * + * This is a transitional helper useful for converting drivers to the atomic + * interfaces. + */ +int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_plane_state *plane_state; + struct drm_plane *plane = crtc->primary; + + if (plane->funcs->atomic_duplicate_state) + plane_state = plane->funcs->atomic_duplicate_state(plane); + else if (plane->state) + plane_state = drm_atomic_helper_plane_duplicate_state(plane); + else + plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL); + if (!plane_state) + return -ENOMEM; + + plane_state->crtc = crtc; + drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb); + plane_state->crtc_x = 0; + plane_state->crtc_y = 0; + plane_state->crtc_h = crtc->mode.vdisplay; + plane_state->crtc_w = crtc->mode.hdisplay; + plane_state->src_x = x << 16; + plane_state->src_y = y << 16; + plane_state->src_h = crtc->mode.vdisplay << 16; + plane_state->src_w = crtc->mode.hdisplay << 16; + + return drm_plane_helper_commit(plane, plane_state, old_fb); +} +EXPORT_SYMBOL(drm_helper_crtc_mode_set_base); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 08e33b8b13a4..959e2074b0d4 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -39,198 +39,6 @@ * blocks, ... */ -/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ -static int -i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, - uint8_t write_byte, uint8_t *read_byte) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - int ret; - - ret = (*algo_data->aux_ch)(adapter, mode, - write_byte, read_byte); - return ret; -} - -/* - * I2C over AUX CH - */ - -/* - * Send the address. If the I2C link is running, this 'restarts' - * the connection with the new address, this is used for doing - * a write followed by a read (as needed for DDC) - */ -static int -i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - int mode = MODE_I2C_START; - int ret; - - if (reading) - mode |= MODE_I2C_READ; - else - mode |= MODE_I2C_WRITE; - algo_data->address = address; - algo_data->running = true; - ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); - return ret; -} - -/* - * Stop the I2C transaction. This closes out the link, sending - * a bare address packet with the MOT bit turned off - */ -static void -i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - int mode = MODE_I2C_STOP; - - if (reading) - mode |= MODE_I2C_READ; - else - mode |= MODE_I2C_WRITE; - if (algo_data->running) { - (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); - algo_data->running = false; - } -} - -/* - * Write a single byte to the current I2C address, the - * the I2C link must be running or this returns -EIO - */ -static int -i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - int ret; - - if (!algo_data->running) - return -EIO; - - ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL); - return ret; -} - -/* - * Read a single byte from the current I2C address, the - * I2C link must be running or this returns -EIO - */ -static int -i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - int ret; - - if (!algo_data->running) - return -EIO; - - ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret); - return ret; -} - -static int -i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter, - struct i2c_msg *msgs, - int num) -{ - int ret = 0; - bool reading = false; - int m; - int b; - - for (m = 0; m < num; m++) { - u16 len = msgs[m].len; - u8 *buf = msgs[m].buf; - reading = (msgs[m].flags & I2C_M_RD) != 0; - ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading); - if (ret < 0) - break; - if (reading) { - for (b = 0; b < len; b++) { - ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]); - if (ret < 0) - break; - } - } else { - for (b = 0; b < len; b++) { - ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]); - if (ret < 0) - break; - } - } - if (ret < 0) - break; - } - if (ret >= 0) - ret = num; - i2c_algo_dp_aux_stop(adapter, reading); - DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret); - return ret; -} - -static u32 -i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter) -{ - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | - I2C_FUNC_SMBUS_READ_BLOCK_DATA | - I2C_FUNC_SMBUS_BLOCK_PROC_CALL | - I2C_FUNC_10BIT_ADDR; -} - -static const struct i2c_algorithm i2c_dp_aux_algo = { - .master_xfer = i2c_algo_dp_aux_xfer, - .functionality = i2c_algo_dp_aux_functionality, -}; - -static void -i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) -{ - (void) i2c_algo_dp_aux_address(adapter, 0, false); - (void) i2c_algo_dp_aux_stop(adapter, false); -} - -static int -i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) -{ - adapter->algo = &i2c_dp_aux_algo; - adapter->retries = 3; - i2c_dp_aux_reset_bus(adapter); - return 0; -} - -/** - * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper - * @adapter: i2c adapter to register - * - * This registers an i2c adapter that uses dp aux channel as it's underlaying - * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure - * and store it in the algo_data member of the @adapter argument. This will be - * used by the i2c over dp aux algorithm to drive the hardware. - * - * RETURNS: - * 0 on success, -ERRNO on failure. - * - * IMPORTANT: - * This interface is deprecated, please switch to the new dp aux helpers and - * drm_dp_aux_register(). - */ -int -i2c_dp_aux_add_bus(struct i2c_adapter *adapter) -{ - int error; - - error = i2c_dp_aux_prepare_bus(adapter); - if (error) - return error; - error = i2c_add_adapter(adapter); - return error; -} -EXPORT_SYMBOL(i2c_dp_aux_add_bus); - /* Helpers for DP link training */ static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) { @@ -654,10 +462,12 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) case DP_AUX_I2C_REPLY_NACK: DRM_DEBUG_KMS("I2C nack\n"); + aux->i2c_nack_count++; return -EREMOTEIO; case DP_AUX_I2C_REPLY_DEFER: DRM_DEBUG_KMS("I2C defer\n"); + aux->i2c_defer_count++; usleep_range(400, 500); continue; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 070f913d2dba..dc98b8f78168 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1011,19 +1011,20 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, static void build_mst_prop_path(struct drm_dp_mst_port *port, struct drm_dp_mst_branch *mstb, - char *proppath) + char *proppath, + size_t proppath_size) { int i; char temp[8]; - snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id); + snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); for (i = 0; i < (mstb->lct - 1); i++) { int shift = (i % 2) ? 0 : 4; int port_num = mstb->rad[i / 2] >> shift; - snprintf(temp, 8, "-%d", port_num); - strncat(proppath, temp, 255); + snprintf(temp, sizeof(temp), "-%d", port_num); + strlcat(proppath, temp, proppath_size); } - snprintf(temp, 8, "-%d", port->port_num); - strncat(proppath, temp, 255); + snprintf(temp, sizeof(temp), "-%d", port->port_num); + strlcat(proppath, temp, proppath_size); } static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, @@ -1094,7 +1095,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, if (created && !port->input) { char proppath[255]; - build_mst_prop_path(port, mstb, proppath); + build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index bc3da32d4585..2e5c7d941313 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -56,7 +56,7 @@ static struct idr drm_minors_idr; struct class *drm_class; static struct dentry *drm_debugfs_root; -void drm_err(const char *func, const char *format, ...) +void drm_err(const char *format, ...) { struct va_format vaf; va_list args; @@ -66,7 +66,8 @@ void drm_err(const char *func, const char *format, ...) vaf.fmt = format; vaf.va = &args; - printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf); + printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV", + __builtin_return_address(0), &vaf); va_end(args); } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0c0c39bac23d..09d47e9ba026 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1570,7 +1570,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) modeset = &fb_helper->crtc_info[i].mode_set; if (modeset->num_connectors == 0) { BUG_ON(modeset->fb); - BUG_ON(modeset->num_connectors); if (modeset->mode) drm_mode_destroy(dev, modeset->mode); modeset->mode = NULL; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index ed7bc68f7e87..91e1105f2800 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -515,10 +515,12 @@ ssize_t drm_read(struct file *filp, char __user *buffer, size_t total; ssize_t ret; - ret = wait_event_interruptible(file_priv->event_wait, - !list_empty(&file_priv->event_list)); - if (ret < 0) - return ret; + if ((filp->f_flags & O_NONBLOCK) == 0) { + ret = wait_event_interruptible(file_priv->event_wait, + !list_empty(&file_priv->event_list)); + if (ret < 0) + return ret; + } total = 0; while (drm_dequeue_event(file_priv, total, count, &e)) { @@ -532,7 +534,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer, e->destroy(e); } - return total; + return total ?: -EAGAIN; } EXPORT_SYMBOL(drm_read); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 5ef03c216a27..3e6b582f60dd 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1190,7 +1190,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off); * * This functions restores the vblank interrupt state captured with * drm_vblank_off() again. Note that calls to drm_vblank_on() and - * drm_vblank_off() can be unbalanced and so can also be unconditionaly called + * drm_vblank_off() can be unbalanced and so can also be unconditionally called * in driver load code to reflect the current hardware state of the crtc. * * This is the legacy version of drm_crtc_vblank_on(). @@ -1237,7 +1237,7 @@ EXPORT_SYMBOL(drm_vblank_on); * * This functions restores the vblank interrupt state captured with * drm_vblank_off() again. Note that calls to drm_vblank_on() and - * drm_vblank_off() can be unbalanced and so can also be unconditionaly called + * drm_vblank_off() can be unbalanced and so can also be unconditionally called * in driver load code to reflect the current hardware state of the crtc. * * This is the native kms version of drm_vblank_on(). diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index d1b7d2006529..6d8b941c8200 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -914,7 +914,7 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo); * * This function is a helper which can be used to validate modes against size * limitations of the DRM device/connector. If a mode is too big its status - * memeber is updated with the appropriate validation failure code. The list + * member is updated with the appropriate validation failure code. The list * itself is not changed. */ void drm_mode_validate_size(struct drm_device *dev, diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 827ec1a3040b..d99c452b0563 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -27,10 +27,38 @@ #include <drm/drmP.h> #include <drm/drm_plane_helper.h> #include <drm/drm_rect.h> -#include <drm/drm_plane_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_atomic_helper.h> #define SUBPIXEL_MASK 0xffff +/** + * DOC: overview + * + * This helper library has two parts. The first part has support to implement + * primary plane support on top of the normal CRTC configuration interface. + * Since the legacy ->set_config interface ties the primary plane together with + * the CRTC state this does not allow userspace to disable the primary plane + * itself. To avoid too much duplicated code use + * drm_plane_helper_check_update() which can be used to enforce the same + * restrictions as primary planes had thus. The default primary plane only + * expose XRBG8888 and ARGB8888 as valid pixel formats for the attached + * framebuffer. + * + * Drivers are highly recommended to implement proper support for primary + * planes, and newly merged drivers must not rely upon these transitional + * helpers. + * + * The second part also implements transitional helpers which allow drivers to + * gradually switch to the atomic helper infrastructure for plane updates. Once + * that switch is complete drivers shouldn't use these any longer, instead using + * the proper legacy implementations for update and disable plane hooks provided + * by the atomic helpers. + * + * Again drivers are strongly urged to switch to the new interfaces. + */ + /* * This is the minimal list of formats that seem to be safe for modeset use * with all current DRM drivers. Most hardware can actually support more @@ -369,3 +397,171 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs); } EXPORT_SYMBOL(drm_crtc_init); + +int drm_plane_helper_commit(struct drm_plane *plane, + struct drm_plane_state *plane_state, + struct drm_framebuffer *old_fb) +{ + struct drm_plane_helper_funcs *plane_funcs; + struct drm_crtc *crtc[2]; + struct drm_crtc_helper_funcs *crtc_funcs[2]; + int i, ret = 0; + + plane_funcs = plane->helper_private; + + /* Since this is a transitional helper we can't assume that plane->state + * is always valid. Hence we need to use plane->crtc instead of + * plane->state->crtc as the old crtc. */ + crtc[0] = plane->crtc; + crtc[1] = crtc[0] != plane_state->crtc ? plane_state->crtc : NULL; + + for (i = 0; i < 2; i++) + crtc_funcs[i] = crtc[i] ? crtc[i]->helper_private : NULL; + + if (plane_funcs->atomic_check) { + ret = plane_funcs->atomic_check(plane, plane_state); + if (ret) + goto out; + } + + if (plane_funcs->prepare_fb && plane_state->fb) { + ret = plane_funcs->prepare_fb(plane, plane_state->fb); + if (ret) + goto out; + } + + /* Point of no return, commit sw state. */ + swap(plane->state, plane_state); + + for (i = 0; i < 2; i++) { + if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin) + crtc_funcs[i]->atomic_begin(crtc[i]); + } + + plane_funcs->atomic_update(plane); + + for (i = 0; i < 2; i++) { + if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush) + crtc_funcs[i]->atomic_flush(crtc[i]); + } + + for (i = 0; i < 2; i++) { + if (!crtc[i]) + continue; + + /* There's no other way to figure out whether the crtc is running. */ + ret = drm_crtc_vblank_get(crtc[i]); + if (ret == 0) { + drm_crtc_wait_one_vblank(crtc[i]); + drm_crtc_vblank_put(crtc[i]); + } + + ret = 0; + } + + if (plane_funcs->cleanup_fb && old_fb) + plane_funcs->cleanup_fb(plane, old_fb); +out: + if (plane_state) { + if (plane->funcs->atomic_destroy_state) + plane->funcs->atomic_destroy_state(plane, plane_state); + else + drm_atomic_helper_plane_destroy_state(plane, plane_state); + } + + return ret; +} + +/** + * drm_plane_helper_update() - Helper for primary plane update + * @plane: plane object to update + * @crtc: owning CRTC of owning plane + * @fb: framebuffer to flip onto plane + * @crtc_x: x offset of primary plane on crtc + * @crtc_y: y offset of primary plane on crtc + * @crtc_w: width of primary plane rectangle on crtc + * @crtc_h: height of primary plane rectangle on crtc + * @src_x: x offset of @fb for panning + * @src_y: y offset of @fb for panning + * @src_w: width of source rectangle in @fb + * @src_h: height of source rectangle in @fb + * + * Provides a default plane update handler using the atomic plane update + * functions. It is fully left to the driver to check plane constraints and + * handle corner-cases like a fully occluded or otherwise invisible plane. + * + * This is useful for piecewise transitioning of a driver to the atomic helpers. + * + * RETURNS: + * Zero on success, error code on failure + */ +int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_plane_state *plane_state; + + if (plane->funcs->atomic_duplicate_state) + plane_state = plane->funcs->atomic_duplicate_state(plane); + else if (plane->state) + plane_state = drm_atomic_helper_plane_duplicate_state(plane); + else + plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL); + if (!plane_state) + return -ENOMEM; + + plane_state->crtc = crtc; + drm_atomic_set_fb_for_plane(plane_state, fb); + plane_state->crtc_x = crtc_x; + plane_state->crtc_y = crtc_y; + plane_state->crtc_h = crtc_h; + plane_state->crtc_w = crtc_w; + plane_state->src_x = src_x; + plane_state->src_y = src_y; + plane_state->src_h = src_h; + plane_state->src_w = src_w; + + return drm_plane_helper_commit(plane, plane_state, plane->fb); +} +EXPORT_SYMBOL(drm_plane_helper_update); + +/** + * drm_plane_helper_disable() - Helper for primary plane disable + * @plane: plane to disable + * + * Provides a default plane disable handler using the atomic plane update + * functions. It is fully left to the driver to check plane constraints and + * handle corner-cases like a fully occluded or otherwise invisible plane. + * + * This is useful for piecewise transitioning of a driver to the atomic helpers. + * + * RETURNS: + * Zero on success, error code on failure + */ +int drm_plane_helper_disable(struct drm_plane *plane) +{ + struct drm_plane_state *plane_state; + + /* crtc helpers love to call disable functions for already disabled hw + * functions. So cope with that. */ + if (!plane->crtc) + return 0; + + if (plane->funcs->atomic_duplicate_state) + plane_state = plane->funcs->atomic_duplicate_state(plane); + else if (plane->state) + plane_state = drm_atomic_helper_plane_duplicate_state(plane); + else + plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL); + if (!plane_state) + return -ENOMEM; + + plane_state->crtc = NULL; + drm_atomic_set_fb_for_plane(plane_state, NULL); + + return drm_plane_helper_commit(plane, plane_state, plane->fb); +} +EXPORT_SYMBOL(drm_plane_helper_disable); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 78ca30808422..6872eca6555e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -328,7 +328,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { */ /** - * drm_gem_prime_export - helper library implemention of the export callback + * drm_gem_prime_export - helper library implementation of the export callback * @dev: drm_device to export from * @obj: GEM object to export * @flags: flags like DRM_CLOEXEC @@ -483,7 +483,7 @@ out_unlock: EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); /** - * drm_gem_prime_import - helper library implemention of the import callback + * drm_gem_prime_import - helper library implementation of the import callback * @dev: drm_device to import into * @dma_buf: dma-buf object to import * diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 6857e9ad6339..7483a47de8e4 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -118,7 +118,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect mode->status = MODE_UNVERIFIED; if (connector->force) { - if (connector->force == DRM_FORCE_ON) + if (connector->force == DRM_FORCE_ON || + connector->force == DRM_FORCE_ON_DIGITAL) connector->status = connector_status_connected; else connector->status = connector_status_disconnected; diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 9f158eab517a..0fafb8e2483a 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -37,6 +37,201 @@ #include "gma_display.h" #include <drm/drm_dp_helper.h> +/** + * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp + * aux algorithm + * @running: set by the algo indicating whether an i2c is ongoing or whether + * the i2c bus is quiescent + * @address: i2c target address for the currently ongoing transfer + * @aux_ch: driver callback to transfer a single byte of the i2c payload + */ +struct i2c_algo_dp_aux_data { + bool running; + u16 address; + int (*aux_ch) (struct i2c_adapter *adapter, + int mode, uint8_t write_byte, + uint8_t *read_byte); +}; + +/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ +static int +i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int ret; + + ret = (*algo_data->aux_ch)(adapter, mode, + write_byte, read_byte); + return ret; +} + +/* + * I2C over AUX CH + */ + +/* + * Send the address. If the I2C link is running, this 'restarts' + * the connection with the new address, this is used for doing + * a write followed by a read (as needed for DDC) + */ +static int +i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int mode = MODE_I2C_START; + int ret; + + if (reading) + mode |= MODE_I2C_READ; + else + mode |= MODE_I2C_WRITE; + algo_data->address = address; + algo_data->running = true; + ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); + return ret; +} + +/* + * Stop the I2C transaction. This closes out the link, sending + * a bare address packet with the MOT bit turned off + */ +static void +i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int mode = MODE_I2C_STOP; + + if (reading) + mode |= MODE_I2C_READ; + else + mode |= MODE_I2C_WRITE; + if (algo_data->running) { + (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); + algo_data->running = false; + } +} + +/* + * Write a single byte to the current I2C address, the + * the I2C link must be running or this returns -EIO + */ +static int +i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int ret; + + if (!algo_data->running) + return -EIO; + + ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL); + return ret; +} + +/* + * Read a single byte from the current I2C address, the + * I2C link must be running or this returns -EIO + */ +static int +i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int ret; + + if (!algo_data->running) + return -EIO; + + ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret); + return ret; +} + +static int +i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + int ret = 0; + bool reading = false; + int m; + int b; + + for (m = 0; m < num; m++) { + u16 len = msgs[m].len; + u8 *buf = msgs[m].buf; + reading = (msgs[m].flags & I2C_M_RD) != 0; + ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading); + if (ret < 0) + break; + if (reading) { + for (b = 0; b < len; b++) { + ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]); + if (ret < 0) + break; + } + } else { + for (b = 0; b < len; b++) { + ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]); + if (ret < 0) + break; + } + } + if (ret < 0) + break; + } + if (ret >= 0) + ret = num; + i2c_algo_dp_aux_stop(adapter, reading); + DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret); + return ret; +} + +static u32 +i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL | + I2C_FUNC_10BIT_ADDR; +} + +static const struct i2c_algorithm i2c_dp_aux_algo = { + .master_xfer = i2c_algo_dp_aux_xfer, + .functionality = i2c_algo_dp_aux_functionality, +}; + +static void +i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) +{ + (void) i2c_algo_dp_aux_address(adapter, 0, false); + (void) i2c_algo_dp_aux_stop(adapter, false); +} + +static int +i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) +{ + adapter->algo = &i2c_dp_aux_algo; + adapter->retries = 3; + i2c_dp_aux_reset_bus(adapter); + return 0; +} + +/* + * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to + * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon. + */ +static int __deprecated +i2c_dp_aux_add_bus(struct i2c_adapter *adapter) +{ + int error; + + error = i2c_dp_aux_prepare_bus(adapter); + if (error) + return error; + error = i2c_add_adapter(adapter); + return error; +} + #define _wait_for(COND, MS, W) ({ \ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ int ret__ = 0; \ diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 87b50ba64ed4..b21a09451d1d 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -21,6 +21,7 @@ #include <linux/i2c.h> #include <drm/drmP.h> +#include <drm/drm_plane_helper.h> #include "framebuffer.h" #include "psb_drv.h" #include "psb_intel_drv.h" diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 0be96fdb5e28..58529cea575d 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1631,57 +1631,8 @@ static int psb_intel_sdvo_get_modes(struct drm_connector *connector) return !list_empty(&connector->probed_modes); } -static void -psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector) -{ - struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector); - struct drm_device *dev = connector->dev; - - if (psb_intel_sdvo_connector->left) - drm_property_destroy(dev, psb_intel_sdvo_connector->left); - if (psb_intel_sdvo_connector->right) - drm_property_destroy(dev, psb_intel_sdvo_connector->right); - if (psb_intel_sdvo_connector->top) - drm_property_destroy(dev, psb_intel_sdvo_connector->top); - if (psb_intel_sdvo_connector->bottom) - drm_property_destroy(dev, psb_intel_sdvo_connector->bottom); - if (psb_intel_sdvo_connector->hpos) - drm_property_destroy(dev, psb_intel_sdvo_connector->hpos); - if (psb_intel_sdvo_connector->vpos) - drm_property_destroy(dev, psb_intel_sdvo_connector->vpos); - if (psb_intel_sdvo_connector->saturation) - drm_property_destroy(dev, psb_intel_sdvo_connector->saturation); - if (psb_intel_sdvo_connector->contrast) - drm_property_destroy(dev, psb_intel_sdvo_connector->contrast); - if (psb_intel_sdvo_connector->hue) - drm_property_destroy(dev, psb_intel_sdvo_connector->hue); - if (psb_intel_sdvo_connector->sharpness) - drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness); - if (psb_intel_sdvo_connector->flicker_filter) - drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter); - if (psb_intel_sdvo_connector->flicker_filter_2d) - drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d); - if (psb_intel_sdvo_connector->flicker_filter_adaptive) - drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive); - if (psb_intel_sdvo_connector->tv_luma_filter) - drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter); - if (psb_intel_sdvo_connector->tv_chroma_filter) - drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter); - if (psb_intel_sdvo_connector->dot_crawl) - drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl); - if (psb_intel_sdvo_connector->brightness) - drm_property_destroy(dev, psb_intel_sdvo_connector->brightness); -} - static void psb_intel_sdvo_destroy(struct drm_connector *connector) { - struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector); - - if (psb_intel_sdvo_connector->tv_format) - drm_property_destroy(connector->dev, - psb_intel_sdvo_connector->tv_format); - - psb_intel_sdvo_destroy_enhance_property(connector); drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index c1dd485aeb6c..75fd7de9bf4b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -11,7 +11,9 @@ i915-y := i915_drv.o \ i915_params.o \ i915_suspend.o \ i915_sysfs.o \ - intel_pm.o + intel_pm.o \ + intel_runtime_pm.o + i915-$(CONFIG_COMPAT) += i915_ioc32.o i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o @@ -43,6 +45,8 @@ i915-y += intel_renderstate_gen6.o \ # modesetting core code i915-y += intel_bios.o \ intel_display.o \ + intel_fifo_underrun.o \ + intel_frontbuffer.o \ intel_modes.o \ intel_overlay.o \ intel_sideband.o \ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 593b657d3e59..d074288d5ae9 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -73,7 +73,7 @@ * those commands required by the parser. This generally works because command * opcode ranges have standard command length encodings. So for commands that * the parser does not need to check, it can easily skip them. This is - * implementated via a per-ring length decoding vfunc. + * implemented via a per-ring length decoding vfunc. * * Unfortunately, there are a number of commands that do not follow the standard * length encoding for their opcode range, primarily amongst the MI_* commands. @@ -838,7 +838,7 @@ finish: * @ring: the ring in question * * Only certain platforms require software batch buffer command parsing, and - * only when enabled via module paramter. + * only when enabled via module parameter. * * Return: true if the ring requires software command parsing */ @@ -847,12 +847,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring) if (!ring->needs_cmd_parser) return false; - /* - * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT - * disabled. That will cause all of the parser's PPGTT checks to - * fail. For now, disable parsing when PPGTT is off. - */ - if (USES_PPGTT(ring->dev)) + if (!USES_PPGTT(ring->dev)) return false; return (i915.enable_cmd_parser == 1); @@ -888,8 +883,10 @@ static bool check_cmd(const struct intel_engine_cs *ring, * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands. */ if (reg_addr == OACONTROL) { - if (desc->cmd.value == MI_LOAD_REGISTER_MEM) + if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { + DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); return false; + } if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) *oacontrol_set = (cmd[2] != 0); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 063b44817e08..e60d5c2f4a35 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -516,7 +516,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; struct intel_crtc *crtc; int ret; @@ -529,7 +528,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) const char plane = plane_name(crtc->plane); struct intel_unpin_work *work; - spin_lock_irqsave(&dev->event_lock, flags); + spin_lock_irq(&dev->event_lock); work = crtc->unpin_work; if (work == NULL) { seq_printf(m, "No flip due on pipe %c (plane %c)\n", @@ -575,7 +574,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); } } - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock_irq(&dev->event_lock); } mutex_unlock(&dev->struct_mutex); @@ -717,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } for_each_pipe(dev_priv, pipe) { - if (!intel_display_power_enabled(dev_priv, + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { seq_printf(m, "Pipe %c power disabled\n", pipe_name(pipe)); @@ -1849,6 +1848,8 @@ static int i915_execlists(struct seq_file *m, void *data) if (ret) return ret; + intel_runtime_pm_get(dev_priv); + for_each_ring(ring, dev_priv, ring_id) { struct intel_ctx_submit_request *head_req = NULL; int count = 0; @@ -1900,6 +1901,7 @@ static int i915_execlists(struct seq_file *m, void *data) seq_putc(m, '\n'); } + intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); return 0; @@ -1986,7 +1988,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) I915_READ(MAD_DIMM_C2)); seq_printf(m, "TILECTL = 0x%08x\n", I915_READ(TILECTL)); - if (IS_GEN8(dev)) + if (INTEL_INFO(dev)->gen >= 8) seq_printf(m, "GAMTARBMODE = 0x%08x\n", I915_READ(GAMTARBMODE)); else @@ -2656,18 +2658,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs); - for (i = 0; i < dev_priv->num_wa_regs; ++i) { - u32 addr, mask; - - addr = dev_priv->intel_wa_regs[i].addr; - mask = dev_priv->intel_wa_regs[i].mask; - dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask; - if (dev_priv->intel_wa_regs[i].addr) - seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", - dev_priv->intel_wa_regs[i].addr, - dev_priv->intel_wa_regs[i].value, - dev_priv->intel_wa_regs[i].mask); + seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); + for (i = 0; i < dev_priv->workarounds.count; ++i) { + u32 addr, mask, value, read; + bool ok; + + addr = dev_priv->workarounds.reg[i].addr; + mask = dev_priv->workarounds.reg[i].mask; + value = dev_priv->workarounds.reg[i].value; + read = I915_READ(addr); + ok = (value & mask) == (read & mask); + seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", + addr, value, mask, read, ok ? "OK" : "FAIL"); } intel_runtime_pm_put(dev_priv); @@ -3256,6 +3258,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; + struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, + pipe)); u32 val = 0; /* shut up gcc */ int ret; @@ -3291,6 +3295,14 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, if (!pipe_crc->entries) return -ENOMEM; + /* + * When IPS gets enabled, the pipe CRC changes. Since IPS gets + * enabled and disabled dynamically based on package C states, + * user space can't make reliable use of the CRCs, so let's just + * completely disable it. + */ + hsw_disable_ips(crtc); + spin_lock_irq(&pipe_crc->lock); pipe_crc->head = 0; pipe_crc->tail = 0; @@ -3329,6 +3341,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, vlv_undo_pipe_scramble_reset(dev, pipe); else if (IS_HASWELL(dev) && pipe == PIPE_A) hsw_undo_trans_edp_pipe_A_crc_wa(dev); + + hsw_enable_ips(crtc); } return 0; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1403b01e8216..9a7353302b3f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1275,12 +1275,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(dev->pdev, PCI_D0); - i915_resume(dev); + i915_resume_legacy(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { pr_err("switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - i915_suspend(dev, pmm); + i915_suspend_legacy(dev, pmm); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -1338,14 +1338,7 @@ static int i915_load_modeset_init(struct drm_device *dev) intel_power_domains_init_hw(dev_priv); - /* - * We enable some interrupt sources in our postinstall hooks, so mark - * interrupts as enabled _before_ actually enabling them to avoid - * special cases in our ordering checks. - */ - dev_priv->pm._irqs_disabled = false; - - ret = drm_irq_install(dev, dev->pdev->irq); + ret = intel_irq_install(dev_priv); if (ret) goto cleanup_gem_stolen; @@ -1370,7 +1363,7 @@ static int i915_load_modeset_init(struct drm_device *dev) goto cleanup_gem; /* Only enable hotplug handling once the fbdev is fully set up. */ - intel_hpd_init(dev); + intel_hpd_init(dev_priv); /* * Some ports require correctly set-up hpd registers for detection to @@ -1534,7 +1527,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) info = (struct intel_device_info *)&dev_priv->info; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9) for_each_pipe(dev_priv, pipe) info->num_sprites[pipe] = 2; else @@ -1614,7 +1607,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); - spin_lock_init(&dev_priv->backlight_lock); + mutex_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); spin_lock_init(&dev_priv->mm.object_stat_lock); spin_lock_init(&dev_priv->mmio_flip_lock); @@ -1740,7 +1733,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_freewq; } - intel_irq_init(dev); + intel_irq_init(dev_priv); intel_uncore_sanitize(dev); /* Try to make sure MCHBAR is enabled before poking at it */ @@ -1798,12 +1791,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_GEN5(dev)) intel_gpu_ips_init(dev_priv); - intel_init_runtime_pm(dev_priv); + intel_runtime_pm_enable(dev_priv); return 0; out_power_well: - intel_power_domains_remove(dev_priv); + intel_power_domains_fini(dev_priv); drm_vblank_cleanup(dev); out_gem_unload: WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); @@ -1846,16 +1839,10 @@ int i915_driver_unload(struct drm_device *dev) return ret; } - intel_fini_runtime_pm(dev_priv); + intel_power_domains_fini(dev_priv); intel_gpu_ips_teardown(); - /* The i915.ko module is still not prepared to be loaded when - * the power well is not enabled, so just enable it in case - * we're going to unload/reload. */ - intel_display_set_init_power(dev_priv, true); - intel_power_domains_remove(dev_priv); - i915_teardown_sysfs(dev); WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); @@ -1866,8 +1853,12 @@ int i915_driver_unload(struct drm_device *dev) acpi_video_unregister(); - if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_fbdev_fini(dev); + + drm_vblank_cleanup(dev); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_modeset_cleanup(dev); /* @@ -1908,8 +1899,6 @@ int i915_driver_unload(struct drm_device *dev) i915_free_hws(dev); } - drm_vblank_cleanup(dev); - intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 055d5e7fbf12..035ec94ca3c7 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -356,6 +356,19 @@ static const struct intel_device_info intel_cherryview_info = { CURSOR_OFFSETS, }; +static const struct intel_device_info intel_skylake_info = { + .is_preliminary = 1, + .is_skylake = 1, + .gen = 9, .num_pipes = 3, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .has_llc = 1, + .has_ddi = 1, + .has_fbc = 1, + GEN_DEFAULT_PIPEOFFSETS, + IVB_CURSOR_OFFSETS, +}; + /* * Make sure any device matches here are from most specific to most * general. For example, since the Quanta match is based on the subsystem @@ -392,7 +405,8 @@ static const struct intel_device_info intel_cherryview_info = { INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ - INTEL_CHV_IDS(&intel_cherryview_info) + INTEL_CHV_IDS(&intel_cherryview_info), \ + INTEL_SKL_IDS(&intel_skylake_info) static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_PCI_IDS, @@ -449,7 +463,7 @@ void intel_detect_pch(struct drm_device *dev) dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint PCH\n"); WARN_ON(!IS_HASWELL(dev)); - WARN_ON(IS_ULT(dev)); + WARN_ON(IS_HSW_ULT(dev)); } else if (IS_BROADWELL(dev)) { dev_priv->pch_type = PCH_LPT; dev_priv->pch_id = @@ -460,7 +474,15 @@ void intel_detect_pch(struct drm_device *dev) dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev)); - WARN_ON(!IS_ULT(dev)); + WARN_ON(!IS_HSW_ULT(dev)); + } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_SPT; + DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); + WARN_ON(!IS_SKYLAKE(dev)); + } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_SPT; + DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); + WARN_ON(!IS_SKYLAKE(dev)); } else continue; @@ -532,7 +554,7 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv); static int intel_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume); -static int i915_drm_freeze(struct drm_device *dev) +static int i915_drm_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; @@ -575,14 +597,14 @@ static int i915_drm_freeze(struct drm_device *dev) flush_delayed_work(&dev_priv->rps.delayed_resume_work); - intel_runtime_pm_disable_interrupts(dev); + intel_runtime_pm_disable_interrupts(dev_priv); intel_hpd_cancel_work(dev_priv); intel_suspend_encoders(dev_priv); intel_suspend_gt_powersave(dev); - intel_modeset_suspend_hw(dev); + intel_suspend_hw(dev); } i915_gem_suspend_gtt_mappings(dev); @@ -608,7 +630,26 @@ static int i915_drm_freeze(struct drm_device *dev) return 0; } -int i915_suspend(struct drm_device *dev, pm_message_t state) +static int i915_drm_suspend_late(struct drm_device *drm_dev) +{ + struct drm_i915_private *dev_priv = drm_dev->dev_private; + int ret; + + ret = intel_suspend_complete(dev_priv); + + if (ret) { + DRM_ERROR("Suspend complete failed: %d\n", ret); + + return ret; + } + + pci_disable_device(drm_dev->pdev); + pci_set_power_state(drm_dev->pdev, PCI_D3hot); + + return 0; +} + +int i915_suspend_legacy(struct drm_device *dev, pm_message_t state) { int error; @@ -618,48 +659,25 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) return -ENODEV; } - if (state.event == PM_EVENT_PRETHAW) - return 0; - + if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && + state.event != PM_EVENT_FREEZE)) + return -EINVAL; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - error = i915_drm_freeze(dev); + error = i915_drm_suspend(dev); if (error) return error; - if (state.event == PM_EVENT_SUSPEND) { - /* Shut down the device */ - pci_disable_device(dev->pdev); - pci_set_power_state(dev->pdev, PCI_D3hot); - } - - return 0; -} - -static int i915_drm_thaw_early(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - ret = intel_resume_prepare(dev_priv, false); - if (ret) - DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret); - - intel_uncore_early_sanitize(dev, true); - intel_uncore_sanitize(dev); - intel_power_domains_init_hw(dev_priv); - - return ret; + return i915_drm_suspend_late(dev); } -static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) +static int i915_drm_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (drm_core_check_feature(dev, DRIVER_MODESET) && - restore_gtt_mappings) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); @@ -680,16 +698,16 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) } mutex_unlock(&dev->struct_mutex); - intel_runtime_pm_restore_interrupts(dev); + /* We need working interrupts for modeset enabling ... */ + intel_runtime_pm_enable_interrupts(dev_priv); intel_modeset_init_hw(dev); { - unsigned long irqflags; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } intel_dp_mst_resume(dev); @@ -703,7 +721,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) * bother with the tiny race here where we might loose hotplug * notifications. * */ - intel_hpd_init(dev); + intel_hpd_init(dev_priv); /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); } @@ -718,21 +736,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) intel_opregion_notify_adapter(dev, PCI_D0); - return 0; -} - -static int i915_drm_thaw(struct drm_device *dev) -{ - if (drm_core_check_feature(dev, DRIVER_MODESET)) - i915_check_and_clear_faults(dev); + drm_kms_helper_poll_enable(dev); - return __i915_drm_thaw(dev, true); + return 0; } -static int i915_resume_early(struct drm_device *dev) +static int i915_drm_resume_early(struct drm_device *dev) { - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) - return 0; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; /* * We have a resume ordering issue with the snd-hda driver also @@ -748,33 +760,29 @@ static int i915_resume_early(struct drm_device *dev) pci_set_master(dev->pdev); - return i915_drm_thaw_early(dev); + ret = intel_resume_prepare(dev_priv, false); + if (ret) + DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret); + + intel_uncore_early_sanitize(dev, true); + intel_uncore_sanitize(dev); + intel_power_domains_init_hw(dev_priv); + + return ret; } -int i915_resume(struct drm_device *dev) +int i915_resume_legacy(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; - /* - * Platforms with opregion should have sane BIOS, older ones (gen3 and - * earlier) need to restore the GTT mappings since the BIOS might clear - * all our scratch PTEs. - */ - ret = __i915_drm_thaw(dev, !dev_priv->opregion.header); + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + ret = i915_drm_resume_early(dev); if (ret) return ret; - drm_kms_helper_poll_enable(dev); - return 0; -} - -static int i915_resume_legacy(struct drm_device *dev) -{ - i915_resume_early(dev); - i915_resume(dev); - - return 0; + return i915_drm_resume(dev); } /** @@ -820,6 +828,9 @@ int i915_reset(struct drm_device *dev) } } + if (i915_stop_ring_allow_warn(dev_priv)) + pr_notice("drm/i915: Resetting chip after gpu hang\n"); + if (ret) { DRM_ERROR("Failed to reset chip: %i\n", ret); mutex_unlock(&dev->struct_mutex); @@ -923,15 +934,13 @@ static int i915_pm_suspend(struct device *dev) if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - return i915_drm_freeze(drm_dev); + return i915_drm_suspend(drm_dev); } static int i915_pm_suspend_late(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - struct drm_i915_private *dev_priv = drm_dev->dev_private; - int ret; /* * We have a suspedn ordering issue with the snd-hda driver also @@ -945,16 +954,7 @@ static int i915_pm_suspend_late(struct device *dev) if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - ret = intel_suspend_complete(dev_priv); - - if (ret) - DRM_ERROR("Suspend complete failed: %d\n", ret); - else { - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); - } - - return ret; + return i915_drm_suspend_late(drm_dev); } static int i915_pm_resume_early(struct device *dev) @@ -962,52 +962,21 @@ static int i915_pm_resume_early(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - return i915_resume_early(drm_dev); -} - -static int i915_pm_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - - return i915_resume(drm_dev); -} - -static int i915_pm_freeze(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - - if (!drm_dev || !drm_dev->dev_private) { - dev_err(dev, "DRM not initialized, aborting suspend.\n"); - return -ENODEV; - } - - return i915_drm_freeze(drm_dev); -} - -static int i915_pm_thaw_early(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; - return i915_drm_thaw_early(drm_dev); + return i915_drm_resume_early(drm_dev); } -static int i915_pm_thaw(struct device *dev) +static int i915_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - return i915_drm_thaw(drm_dev); -} - -static int i915_pm_poweroff(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; - return i915_drm_freeze(drm_dev); + return i915_drm_resume(drm_dev); } static int hsw_suspend_complete(struct drm_i915_private *dev_priv) @@ -1446,12 +1415,12 @@ static int intel_runtime_suspend(struct device *device) * intel_mark_idle(). */ cancel_work_sync(&dev_priv->rps.work); - intel_runtime_pm_disable_interrupts(dev); + intel_runtime_pm_disable_interrupts(dev_priv); ret = intel_suspend_complete(dev_priv); if (ret) { DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); - intel_runtime_pm_restore_interrupts(dev); + intel_runtime_pm_enable_interrupts(dev_priv); return ret; } @@ -1511,7 +1480,7 @@ static int intel_runtime_resume(struct device *device) i915_gem_init_swizzling(dev); gen6_update_ring_freq(dev); - intel_runtime_pm_restore_interrupts(dev); + intel_runtime_pm_enable_interrupts(dev_priv); intel_reset_gt_powersave(dev); if (ret) @@ -1565,16 +1534,40 @@ static int intel_resume_prepare(struct drm_i915_private *dev_priv, } static const struct dev_pm_ops i915_pm_ops = { + /* + * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, + * PMSG_RESUME] + */ .suspend = i915_pm_suspend, .suspend_late = i915_pm_suspend_late, .resume_early = i915_pm_resume_early, .resume = i915_pm_resume, - .freeze = i915_pm_freeze, - .thaw_early = i915_pm_thaw_early, - .thaw = i915_pm_thaw, - .poweroff = i915_pm_poweroff, + + /* + * S4 event handlers + * @freeze, @freeze_late : called (1) before creating the + * hibernation image [PMSG_FREEZE] and + * (2) after rebooting, before restoring + * the image [PMSG_QUIESCE] + * @thaw, @thaw_early : called (1) after creating the hibernation + * image, before writing it [PMSG_THAW] + * and (2) after failing to create or + * restore the image [PMSG_RECOVER] + * @poweroff, @poweroff_late: called after writing the hibernation + * image, before rebooting [PMSG_HIBERNATE] + * @restore, @restore_early : called after rebooting and restoring the + * hibernation image [PMSG_RESTORE] + */ + .freeze = i915_pm_suspend, + .freeze_late = i915_pm_suspend_late, + .thaw_early = i915_pm_resume_early, + .thaw = i915_pm_resume, + .poweroff = i915_pm_suspend, + .poweroff_late = i915_pm_suspend_late, .restore_early = i915_pm_resume_early, .restore = i915_pm_resume, + + /* S0ix (via runtime suspend) event handlers */ .runtime_suspend = intel_runtime_suspend, .runtime_resume = intel_runtime_resume, }; @@ -1616,7 +1609,7 @@ static struct drm_driver driver = { .set_busid = drm_pci_set_busid, /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ - .suspend = i915_suspend, + .suspend = i915_suspend_legacy, .resume = i915_resume_legacy, .device_is_agp = i915_driver_device_is_agp, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 16a6f6d187a1..583c97debeb7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -55,7 +55,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20140905" +#define DRIVER_DATE "20141024" enum pipe { INVALID_PIPE = -1, @@ -76,6 +76,14 @@ enum transcoder { }; #define transcoder_name(t) ((t) + 'A') +/* + * This is the maximum (across all platforms) number of planes (primary + + * sprites) that can be active at the same time on one pipe. + * + * This value doesn't count the cursor plane. + */ +#define I915_MAX_PLANES 3 + enum plane { PLANE_A = 0, PLANE_B, @@ -452,7 +460,7 @@ struct drm_i915_display_funcs { * Returns true on success, false on failure. */ bool (*find_dpll)(const struct intel_limit *limit, - struct drm_crtc *crtc, + struct intel_crtc *crtc, int target, int refclk, struct dpll *match_clock, struct dpll *best_clock); @@ -468,7 +476,7 @@ struct drm_i915_display_funcs { struct intel_crtc_config *); void (*get_plane_config)(struct intel_crtc *, struct intel_plane_config *); - int (*crtc_mode_set)(struct drm_crtc *crtc, + int (*crtc_mode_set)(struct intel_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); void (*crtc_enable)(struct drm_crtc *crtc); @@ -551,6 +559,7 @@ struct intel_uncore { func(is_ivybridge) sep \ func(is_valleyview) sep \ func(is_haswell) sep \ + func(is_skylake) sep \ func(is_preliminary) sep \ func(has_fbc) sep \ func(has_pipe_cxsr) sep \ @@ -663,6 +672,18 @@ struct i915_fbc { bool false_color; + /* Tracks whether the HW is actually enabled, not whether the feature is + * possible. */ + bool enabled; + + /* On gen8 some rings cannont perform fbc clean operation so for now + * we are doing this on SW with mmio. + * This variable works in the opposite information direction + * of ring->fbc_dirty telling software on frontbuffer tracking + * to perform the cache clean on sw side. + */ + bool need_sw_cache_clean; + struct intel_fbc_work { struct delayed_work work; struct drm_crtc *crtc; @@ -704,6 +725,7 @@ enum intel_pch { PCH_IBX, /* Ibexpeak PCH */ PCH_CPT, /* Cougarpoint PCH */ PCH_LPT, /* Lynxpoint PCH */ + PCH_SPT, /* Sunrisepoint PCH */ PCH_NOP, }; @@ -1369,7 +1391,7 @@ struct ilk_wm_values { * * Our driver uses the autosuspend delay feature, which means we'll only really * suspend if we stay with zero refcount for a certain amount of time. The - * default value is currently very conservative (see intel_init_runtime_pm), but + * default value is currently very conservative (see intel_runtime_pm_enable), but * it can be changed with the standard runtime PM files from sysfs. * * The irqs_disabled variable becomes true exactly after we disable the IRQs and @@ -1382,7 +1404,7 @@ struct ilk_wm_values { */ struct i915_runtime_pm { bool suspended; - bool _irqs_disabled; + bool irqs_enabled; }; enum intel_pipe_crc_source { @@ -1426,6 +1448,20 @@ struct i915_frontbuffer_tracking { unsigned flip_bits; }; +struct i915_wa_reg { + u32 addr; + u32 value; + /* bitmask representing WA bits */ + u32 mask; +}; + +#define I915_MAX_WA_REGS 16 + +struct i915_workarounds { + struct i915_wa_reg reg[I915_MAX_WA_REGS]; + u32 count; +}; + struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; @@ -1505,11 +1541,13 @@ struct drm_i915_private { struct intel_opregion opregion; struct intel_vbt_data vbt; + bool preserve_bios_swizzle; + /* overlay */ struct intel_overlay *overlay; /* backlight registers and fields in struct intel_panel */ - spinlock_t backlight_lock; + struct mutex backlight_lock; /* LVDS info */ bool no_aux_handshake; @@ -1568,19 +1606,7 @@ struct drm_i915_private { struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; - /* - * workarounds are currently applied at different places and - * changes are being done to consolidate them so exact count is - * not clear at this point, use a max value for now. - */ -#define I915_MAX_WA_REGS 16 - struct { - u32 addr; - u32 value; - /* bitmask representing WA bits */ - u32 mask; - } intel_wa_regs[I915_MAX_WA_REGS]; - u32 num_wa_regs; + struct i915_workarounds workarounds; /* Reclocking support */ bool render_reclock_avail; @@ -2073,6 +2099,7 @@ struct drm_i915_cmd_table { #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) +#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) @@ -2080,9 +2107,10 @@ struct drm_i915_cmd_table { ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ (INTEL_DEVID(dev) & 0xf) == 0x6 || \ (INTEL_DEVID(dev) & 0xf) == 0xe)) +#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ + (INTEL_DEVID(dev) & 0x00F0) == 0x0020) #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) -#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0x00F0) == 0x0020) /* ULX machines are also considered ULT. */ @@ -2103,6 +2131,7 @@ struct drm_i915_cmd_table { #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) +#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) #define RENDER_RING (1<<RCS) #define BSD_RING (1<<VCS) @@ -2115,13 +2144,11 @@ struct drm_i915_cmd_table { #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ - to_i915(dev)->ellc_size) + __I915__(dev)->ellc_size) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) -#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev)) #define USES_PPGTT(dev) (i915.enable_ppgtt) #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) @@ -2154,13 +2181,15 @@ struct drm_i915_cmd_table { #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) -#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) +#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) +#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) +#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 @@ -2168,8 +2197,11 @@ struct drm_i915_cmd_table { #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 +#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 +#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 -#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) +#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) +#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) @@ -2189,8 +2221,8 @@ struct drm_i915_cmd_table { extern const struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; -extern int i915_suspend(struct drm_device *dev, pm_message_t state); -extern int i915_resume(struct drm_device *dev); +extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state); +extern int i915_resume_legacy(struct drm_device *dev); extern int i915_master_create(struct drm_device *dev, struct drm_master *master); extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); @@ -2262,8 +2294,10 @@ void i915_handle_error(struct drm_device *dev, bool wedged, void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir, int new_delay); -extern void intel_irq_init(struct drm_device *dev); -extern void intel_hpd_init(struct drm_device *dev); +extern void intel_irq_init(struct drm_i915_private *dev_priv); +extern void intel_hpd_init(struct drm_i915_private *dev_priv); +int intel_irq_install(struct drm_i915_private *dev_priv); +void intel_irq_uninstall(struct drm_i915_private *dev_priv); extern void intel_uncore_sanitize(struct drm_device *dev); extern void intel_uncore_early_sanitize(struct drm_device *dev, @@ -2283,6 +2317,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); +void +ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); +void +ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); +void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask); +#define ibx_enable_display_interrupt(dev_priv, bits) \ + ibx_display_interrupt_update((dev_priv), (bits), (bits)) +#define ibx_disable_display_interrupt(dev_priv, bits) \ + ibx_display_interrupt_update((dev_priv), (bits), 0) /* i915_gem.c */ int i915_gem_init_ioctl(struct drm_device *dev, void *data, @@ -2793,7 +2838,6 @@ static inline void intel_unregister_dsm_handler(void) { return; } /* modesetting */ extern void intel_modeset_init_hw(struct drm_device *dev); -extern void intel_modeset_suspend_hw(struct drm_device *dev); extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); @@ -2804,7 +2848,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev, extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev); -extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value); +extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value); extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 28f91df2604d..827edb589883 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1466,6 +1466,16 @@ unlock: * * While the mapping holds a reference on the contents of the object, it doesn't * imply a ref on the object itself. + * + * IMPORTANT: + * + * DRM driver writers who look a this function as an example for how to do GEM + * mmap support, please don't implement mmap support like here. The modern way + * to implement DRM mmap support is with an mmap offset ioctl (like + * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. + * That way debug tooling like valgrind will understand what's going on, hiding + * the mmap call in a driver private ioctl will break that. The i915 driver only + * does cpu mmaps this way because we didn't know better. */ int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, @@ -1945,7 +1955,14 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, long target, unsigned flags) { - const bool purgeable_only = flags & I915_SHRINK_PURGEABLE; + const struct { + struct list_head *list; + unsigned int bit; + } phases[] = { + { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, + { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, + { NULL, 0 }, + }, *phase; unsigned long count = 0; /* @@ -1967,48 +1984,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, * dev->struct_mutex and so we won't ever be able to observe an * object on the bound_list with a reference count equals 0. */ - if (flags & I915_SHRINK_UNBOUND) { + for (phase = phases; phase->list; phase++) { struct list_head still_in_list; - INIT_LIST_HEAD(&still_in_list); - while (count < target && !list_empty(&dev_priv->mm.unbound_list)) { - struct drm_i915_gem_object *obj; - - obj = list_first_entry(&dev_priv->mm.unbound_list, - typeof(*obj), global_list); - list_move_tail(&obj->global_list, &still_in_list); - - if (!i915_gem_object_is_purgeable(obj) && purgeable_only) - continue; - - drm_gem_object_reference(&obj->base); - - if (i915_gem_object_put_pages(obj) == 0) - count += obj->base.size >> PAGE_SHIFT; - - drm_gem_object_unreference(&obj->base); - } - list_splice(&still_in_list, &dev_priv->mm.unbound_list); - } - - if (flags & I915_SHRINK_BOUND) { - struct list_head still_in_list; + if ((flags & phase->bit) == 0) + continue; INIT_LIST_HEAD(&still_in_list); - while (count < target && !list_empty(&dev_priv->mm.bound_list)) { + while (count < target && !list_empty(phase->list)) { struct drm_i915_gem_object *obj; struct i915_vma *vma, *v; - obj = list_first_entry(&dev_priv->mm.bound_list, + obj = list_first_entry(phase->list, typeof(*obj), global_list); list_move_tail(&obj->global_list, &still_in_list); - if (!i915_gem_object_is_purgeable(obj) && purgeable_only) + if (flags & I915_SHRINK_PURGEABLE && + !i915_gem_object_is_purgeable(obj)) continue; drm_gem_object_reference(&obj->base); - list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) + /* For the unbound phase, this should be a no-op! */ + list_for_each_entry_safe(vma, v, + &obj->vma_list, vma_link) if (i915_vma_unbind(vma)) break; @@ -2017,7 +2016,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, drm_gem_object_unreference(&obj->base); } - list_splice(&still_in_list, &dev_priv->mm.bound_list); + list_splice(&still_in_list, phase->list); } return count; @@ -2811,6 +2810,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) u32 seqno = 0; int ret = 0; + if (args->flags != 0) + return -EINVAL; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -3166,6 +3168,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, obj->stride, obj->tiling_mode); switch (INTEL_INFO(dev)->gen) { + case 9: case 8: case 7: case 6: @@ -3384,46 +3387,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, return true; } -static void i915_gem_verify_gtt(struct drm_device *dev) -{ -#if WATCH_GTT - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj; - int err = 0; - - list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) { - if (obj->gtt_space == NULL) { - printk(KERN_ERR "object found on GTT list with no space reserved\n"); - err++; - continue; - } - - if (obj->cache_level != obj->gtt_space->color) { - printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", - i915_gem_obj_ggtt_offset(obj), - i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), - obj->cache_level, - obj->gtt_space->color); - err++; - continue; - } - - if (!i915_gem_valid_gtt_space(dev, - obj->gtt_space, - obj->cache_level)) { - printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", - i915_gem_obj_ggtt_offset(obj), - i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), - obj->cache_level); - err++; - continue; - } - } - - WARN_ON(err); -#endif -} - /** * Finds free space in the GTT aperture and binds the object there. */ @@ -3532,7 +3495,6 @@ search_free: vma->bind_vma(vma, obj->cache_level, flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0); - i915_gem_verify_gtt(dev); return vma; err_remove_node: @@ -3769,7 +3731,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, old_write_domain); } - i915_gem_verify_gtt(dev); return 0; } @@ -5119,6 +5080,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) return ret; } +/** + * i915_gem_track_fb - update frontbuffer tracking + * old: current GEM buffer for the frontbuffer slots + * new: new GEM buffer for the frontbuffer slots + * frontbuffer_bits: bitmask of frontbuffer slots + * + * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them + * from @old and setting them in @new. Both @old and @new can be NULL. + */ void i915_gem_track_fb(struct drm_i915_gem_object *old, struct drm_i915_gem_object *new, unsigned frontbuffer_bits) @@ -5302,7 +5272,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) struct drm_device *dev = dev_priv->dev; struct drm_i915_gem_object *obj; unsigned long timeout = msecs_to_jiffies(5000) + 1; - unsigned long pinned, bound, unbound, freed; + unsigned long pinned, bound, unbound, freed_pages; bool was_interruptible; bool unlock; @@ -5319,7 +5289,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = false; - freed = i915_gem_shrink_all(dev_priv); + freed_pages = i915_gem_shrink_all(dev_priv); dev_priv->mm.interruptible = was_interruptible; @@ -5350,14 +5320,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) if (unlock) mutex_unlock(&dev->struct_mutex); - pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", - freed, pinned); + if (freed_pages || unbound || bound) + pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", + freed_pages << PAGE_SHIFT, pinned); if (unbound || bound) pr_err("%lu and %lu bytes still available in the " "bound and unbound GPU page lists.\n", bound, unbound); - *(unsigned long *)ptr += freed; + *(unsigned long *)ptr += freed_pages; return NOTIFY_DONE; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b672b843fd5e..ae82ef5e7df4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -35,13 +35,21 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) { - if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) + bool has_aliasing_ppgtt; + bool has_full_ppgtt; + + has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; + has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; + if (IS_GEN8(dev)) + has_full_ppgtt = false; /* XXX why? */ + + if (enable_ppgtt == 0 || !has_aliasing_ppgtt) return 0; if (enable_ppgtt == 1) return 1; - if (enable_ppgtt == 2 && HAS_PPGTT(dev)) + if (enable_ppgtt == 2 && has_full_ppgtt) return 2; #ifdef CONFIG_INTEL_IOMMU @@ -59,7 +67,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) return 0; } - return HAS_ALIASING_PPGTT(dev) ? 1 : 0; + return has_aliasing_ppgtt ? 1 : 0; } @@ -1092,7 +1100,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) if (INTEL_INFO(dev)->gen < 8) return gen6_ppgtt_init(ppgtt); - else if (IS_GEN8(dev)) + else if (IS_GEN8(dev) || IS_GEN9(dev)) return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); else BUG(); @@ -1764,7 +1772,6 @@ static int setup_scratch_page(struct drm_device *dev) page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (page == NULL) return -ENOMEM; - get_page(page); set_pages_uc(page, 1); #ifdef CONFIG_INTEL_IOMMU @@ -1789,7 +1796,6 @@ static void teardown_scratch_page(struct drm_device *dev) set_pages_wb(page, 1); pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - put_page(page); __free_page(page); } @@ -1859,6 +1865,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl) return (gmch_ctrl - 0x17 + 9) << 22; } +static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) +{ + gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; + gen9_gmch_ctl &= BDW_GMCH_GMS_MASK; + + if (gen9_gmch_ctl < 0xf0) + return gen9_gmch_ctl << 25; /* 32 MB units */ + else + /* 4MB increments starting at 0xf0 for 4MB */ + return (gen9_gmch_ctl - 0xf0 + 1) << 22; +} + static int ggtt_probe_common(struct drm_device *dev, size_t gtt_size) { @@ -1955,7 +1973,10 @@ static int gen8_gmch_probe(struct drm_device *dev, pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - if (IS_CHERRYVIEW(dev)) { + if (INTEL_INFO(dev)->gen >= 9) { + *stolen = gen9_get_stolen_size(snb_gmch_ctl); + gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); + } else if (IS_CHERRYVIEW(dev)) { *stolen = chv_get_stolen_size(snb_gmch_ctl); gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); } else { @@ -2127,6 +2148,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, vma->obj = obj; switch (INTEL_INFO(vm->dev)->gen) { + case 9: case 8: case 7: case 6: diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 2cefb597df6d..d1e7a3e088aa 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -102,22 +102,33 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } else if (INTEL_INFO(dev)->gen >= 6) { - uint32_t dimm_c0, dimm_c1; - dimm_c0 = I915_READ(MAD_DIMM_C0); - dimm_c1 = I915_READ(MAD_DIMM_C1); - dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; - dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; - /* Enable swizzling when the channels are populated with - * identically sized dimms. We don't need to check the 3rd - * channel because no cpu with gpu attached ships in that - * configuration. Also, swizzling only makes sense for 2 - * channels anyway. */ - if (dimm_c0 == dimm_c1) { - swizzle_x = I915_BIT_6_SWIZZLE_9_10; - swizzle_y = I915_BIT_6_SWIZZLE_9; + if (dev_priv->preserve_bios_swizzle) { + if (I915_READ(DISP_ARB_CTL) & + DISP_TILE_SURFACE_SWIZZLING) { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } else { + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } } else { - swizzle_x = I915_BIT_6_SWIZZLE_NONE; - swizzle_y = I915_BIT_6_SWIZZLE_NONE; + uint32_t dimm_c0, dimm_c1; + dimm_c0 = I915_READ(MAD_DIMM_C0); + dimm_c1 = I915_READ(MAD_DIMM_C1); + dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; + dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; + /* Enable swizzling when the channels are populated + * with identically sized dimms. We don't need to check + * the 3rd channel because no cpu with gpu attached + * ships in that configuration. Also, swizzling only + * makes sense for 2 channels anyway. */ + if (dimm_c0 == dimm_c1) { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } else { + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } } } else if (IS_GEN5(dev)) { /* On Ironlake whatever DRAM config, GPU always do diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 2c87a797213f..e664599de6e7 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -765,6 +765,7 @@ static void i915_gem_record_fences(struct drm_device *dev, /* Fences */ switch (INTEL_INFO(dev)->gen) { + case 9: case 8: case 7: case 6: @@ -923,6 +924,7 @@ static void i915_record_ring_state(struct drm_device *dev, ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); switch (INTEL_INFO(dev)->gen) { + case 9: case 8: for (i = 0; i < 4; i++) { ering->vm_info.pdp[i] = @@ -1326,13 +1328,12 @@ void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + spin_lock_irq(&dev_priv->gpu_error.lock); error_priv->error = dev_priv->gpu_error.first_error; if (error_priv->error) kref_get(&error_priv->error->ref); - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + spin_unlock_irq(&dev_priv->gpu_error.lock); } @@ -1346,12 +1347,11 @@ void i915_destroy_error_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error; - unsigned long flags; - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + spin_lock_irq(&dev_priv->gpu_error.lock); error = dev_priv->gpu_error.first_error; dev_priv->gpu_error.first_error = NULL; - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + spin_unlock_irq(&dev_priv->gpu_error.lock); if (error) kref_put(&error->ref, i915_error_state_free); @@ -1389,6 +1389,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) WARN_ONCE(1, "Unsupported platform\n"); case 7: case 8: + case 9: instdone[0] = I915_READ(GEN7_INSTDONE_1); instdone[1] = I915_READ(GEN7_SC_INSTDONE); instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 2e0613e26251..176de6322e4d 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -189,7 +189,6 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = { [DRM_I915_ALLOC] = compat_i915_alloc }; -#ifdef CONFIG_COMPAT /** * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card<n>. @@ -218,4 +217,3 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return ret; } -#endif diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f66392b6e287..a2b013d97fb6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -37,6 +37,14 @@ #include "i915_trace.h" #include "intel_drv.h" +/** + * DOC: interrupt handling + * + * These functions provide the basic support for enabling and disabling the + * interrupt handling support. There's a lot more functionality in i915_irq.c + * and related files, but that will be described in separate chapters. + */ + static const u32 hpd_ibx[] = { [HPD_CRT] = SDE_CRT_HOTPLUG, [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, @@ -131,7 +139,7 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ } while (0) /* For display hotplug interrupt */ -static void +void ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); @@ -146,7 +154,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) } } -static void +void ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); @@ -230,24 +238,6 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) snb_update_pm_irq(dev_priv, mask, 0); } -static bool ivb_can_enable_err_int(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *crtc; - enum pipe pipe; - - assert_spin_locked(&dev_priv->irq_lock); - - for_each_pipe(dev_priv, pipe) { - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - - if (crtc->cpu_fifo_underrun_disabled) - return false; - } - - return true; -} - /** * bdw_update_pm_irq - update GT interrupt 2 * @dev_priv: driver private @@ -288,131 +278,15 @@ void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) bdw_update_pm_irq(dev_priv, mask, 0); } -static bool cpt_can_enable_serr_int(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe pipe; - struct intel_crtc *crtc; - - assert_spin_locked(&dev_priv->irq_lock); - - for_each_pipe(dev_priv, pipe) { - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - - if (crtc->pch_fifo_underrun_disabled) - return false; - } - - return true; -} - -void i9xx_check_fifo_underruns(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *crtc; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - - for_each_intel_crtc(dev, crtc) { - u32 reg = PIPESTAT(crtc->pipe); - u32 pipestat; - - if (crtc->cpu_fifo_underrun_disabled) - continue; - - pipestat = I915_READ(reg) & 0xffff0000; - if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) - continue; - - I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); - POSTING_READ(reg); - - DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); - } - - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); -} - -static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, - bool enable, bool old) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg = PIPESTAT(pipe); - u32 pipestat = I915_READ(reg) & 0xffff0000; - - assert_spin_locked(&dev_priv->irq_lock); - - if (enable) { - I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); - POSTING_READ(reg); - } else { - if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); - } -} - -static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : - DE_PIPEB_FIFO_UNDERRUN; - - if (enable) - ironlake_enable_display_irq(dev_priv, bit); - else - ironlake_disable_display_irq(dev_priv, bit); -} - -static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, - bool enable, bool old) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - if (enable) { - I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); - - if (!ivb_can_enable_err_int(dev)) - return; - - ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); - } else { - ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); - - if (old && - I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { - DRM_ERROR("uncleared fifo underrun on pipe %c\n", - pipe_name(pipe)); - } - } -} - -static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - assert_spin_locked(&dev_priv->irq_lock); - - if (enable) - dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; - else - dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; - I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); - POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); -} - /** * ibx_display_interrupt_update - update SDEIMR * @dev_priv: driver private * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) +void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) { uint32_t sdeimr = I915_READ(SDEIMR); sdeimr &= ~interrupt_mask; @@ -426,160 +300,6 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, I915_WRITE(SDEIMR, sdeimr); POSTING_READ(SDEIMR); } -#define ibx_enable_display_interrupt(dev_priv, bits) \ - ibx_display_interrupt_update((dev_priv), (bits), (bits)) -#define ibx_disable_display_interrupt(dev_priv, bits) \ - ibx_display_interrupt_update((dev_priv), (bits), 0) - -static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, - enum transcoder pch_transcoder, - bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t bit = (pch_transcoder == TRANSCODER_A) ? - SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; - - if (enable) - ibx_enable_display_interrupt(dev_priv, bit); - else - ibx_disable_display_interrupt(dev_priv, bit); -} - -static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, - enum transcoder pch_transcoder, - bool enable, bool old) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (enable) { - I915_WRITE(SERR_INT, - SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); - - if (!cpt_can_enable_serr_int(dev)) - return; - - ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); - } else { - ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); - - if (old && I915_READ(SERR_INT) & - SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { - DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", - transcoder_name(pch_transcoder)); - } - } -} - -/** - * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages - * @dev: drm device - * @pipe: pipe - * @enable: true if we want to report FIFO underrun errors, false otherwise - * - * This function makes us disable or enable CPU fifo underruns for a specific - * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun - * reporting for one pipe may also disable all the other CPU error interruts for - * the other pipes, due to the fact that there's just one interrupt mask/enable - * bit for all the pipes. - * - * Returns the previous state of underrun reporting. - */ -static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - bool old; - - assert_spin_locked(&dev_priv->irq_lock); - - old = !intel_crtc->cpu_fifo_underrun_disabled; - intel_crtc->cpu_fifo_underrun_disabled = !enable; - - if (HAS_GMCH_DISPLAY(dev)) - i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); - else if (IS_GEN5(dev) || IS_GEN6(dev)) - ironlake_set_fifo_underrun_reporting(dev, pipe, enable); - else if (IS_GEN7(dev)) - ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); - else if (IS_GEN8(dev)) - broadwell_set_fifo_underrun_reporting(dev, pipe, enable); - - return old; -} - -bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - bool ret; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return ret; -} - -static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, - enum pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - return !intel_crtc->cpu_fifo_underrun_disabled; -} - -/** - * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages - * @dev: drm device - * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) - * @enable: true if we want to report FIFO underrun errors, false otherwise - * - * This function makes us disable or enable PCH fifo underruns for a specific - * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO - * underrun reporting for one transcoder may also disable all the other PCH - * error interruts for the other transcoders, due to the fact that there's just - * one interrupt mask/enable bit for all the transcoders. - * - * Returns the previous state of underrun reporting. - */ -bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, - enum transcoder pch_transcoder, - bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned long flags; - bool old; - - /* - * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT - * has only one pch transcoder A that all pipes can use. To avoid racy - * pch transcoder -> pipe lookups from interrupt code simply store the - * underrun statistics in crtc A. Since we never expose this anywhere - * nor use it outside of the fifo underrun code here using the "wrong" - * crtc on LPT won't cause issues. - */ - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - - old = !intel_crtc->pch_fifo_underrun_disabled; - intel_crtc->pch_fifo_underrun_disabled = !enable; - - if (HAS_PCH_IBX(dev)) - ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); - else - cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old); - - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - return old; -} - static void __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, @@ -589,6 +309,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; assert_spin_locked(&dev_priv->irq_lock); + WARN_ON(!intel_irqs_enabled(dev_priv)); if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || status_mask & ~PIPESTAT_INT_STATUS_MASK, @@ -615,6 +336,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; assert_spin_locked(&dev_priv->irq_lock); + WARN_ON(!intel_irqs_enabled(dev_priv)); if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || status_mask & ~PIPESTAT_INT_STATUS_MASK, @@ -694,19 +416,18 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, static void i915_enable_asle_pipestat(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long irqflags; if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) return; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, PIPE_A, PIPE_LEGACY_BLC_EVENT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } /** @@ -1094,18 +815,17 @@ static void i915_digport_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, dig_port_work); - unsigned long irqflags; u32 long_port_mask, short_port_mask; struct intel_digital_port *intel_dig_port; int i, ret; u32 old_bits = 0; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); long_port_mask = dev_priv->long_hpd_port_mask; dev_priv->long_hpd_port_mask = 0; short_port_mask = dev_priv->short_hpd_port_mask; dev_priv->short_hpd_port_mask = 0; - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); for (i = 0; i < I915_MAX_PORTS; i++) { bool valid = false; @@ -1130,9 +850,9 @@ static void i915_digport_work_func(struct work_struct *work) } if (old_bits) { - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); dev_priv->hpd_event_bits |= old_bits; - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); schedule_work(&dev_priv->hotplug_work); } } @@ -1151,7 +871,6 @@ static void i915_hotplug_work_func(struct work_struct *work) struct intel_connector *intel_connector; struct intel_encoder *intel_encoder; struct drm_connector *connector; - unsigned long irqflags; bool hpd_disabled = false; bool changed = false; u32 hpd_event_bits; @@ -1159,7 +878,7 @@ static void i915_hotplug_work_func(struct work_struct *work) mutex_lock(&mode_config->mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); hpd_event_bits = dev_priv->hpd_event_bits; dev_priv->hpd_event_bits = 0; @@ -1193,7 +912,7 @@ static void i915_hotplug_work_func(struct work_struct *work) msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); list_for_each_entry(connector, &mode_config->connector_list, head) { intel_connector = to_intel_connector(connector); @@ -1488,7 +1207,6 @@ static void ivybridge_parity_work(struct work_struct *work) u32 error_status, row, bank, subbank; char *parity_event[6]; uint32_t misccpctl; - unsigned long flags; uint8_t slice = 0; /* We must turn off DOP level clock gating to access the L3 registers. @@ -1547,9 +1265,9 @@ static void ivybridge_parity_work(struct work_struct *work) out: WARN_ON(dev_priv->l3_parity.which_slice); - spin_lock_irqsave(&dev_priv->irq_lock, flags); + spin_lock_irq(&dev_priv->irq_lock); gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + spin_unlock_irq(&dev_priv->irq_lock); mutex_unlock(&dev_priv->dev->struct_mutex); } @@ -2031,9 +1749,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) * we need to be careful that we only handle what we want to * handle. */ - mask = 0; - if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) - mask |= PIPE_FIFO_UNDERRUN_STATUS; + + /* fifo underruns are filterered in the underrun handler. */ + mask = PIPE_FIFO_UNDERRUN_STATUS; switch (pipe) { case PIPE_A: @@ -2078,9 +1796,8 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) @@ -2247,14 +1964,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); if (pch_iir & SDE_TRANSA_FIFO_UNDER) - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, - false)) - DRM_ERROR("PCH transcoder A FIFO underrun\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); if (pch_iir & SDE_TRANSB_FIFO_UNDER) - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, - false)) - DRM_ERROR("PCH transcoder B FIFO underrun\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); } static void ivb_err_int_handler(struct drm_device *dev) @@ -2267,12 +1980,8 @@ static void ivb_err_int_handler(struct drm_device *dev) DRM_ERROR("Poison interrupt\n"); for_each_pipe(dev_priv, pipe) { - if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, - false)) - DRM_ERROR("Pipe %c FIFO underrun\n", - pipe_name(pipe)); - } + if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { if (IS_IVYBRIDGE(dev)) @@ -2294,19 +2003,13 @@ static void cpt_serr_int_handler(struct drm_device *dev) DRM_ERROR("PCH poison interrupt\n"); if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, - false)) - DRM_ERROR("PCH transcoder A FIFO underrun\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, - false)) - DRM_ERROR("PCH transcoder B FIFO underrun\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, - false)) - DRM_ERROR("PCH transcoder C FIFO underrun\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); I915_WRITE(SERR_INT, serr_int); } @@ -2372,9 +2075,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) intel_check_page_flip(dev, pipe); if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_ERROR("Pipe %c FIFO underrun\n", - pipe_name(pipe)); + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (de_iir & DE_PIPE_CRC_DONE(pipe)) i9xx_pipe_crc_irq_handler(dev, pipe); @@ -2566,7 +2267,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } for_each_pipe(dev_priv, pipe) { - uint32_t pipe_iir; + uint32_t pipe_iir, flip_done = 0, fault_errors = 0; if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) continue; @@ -2575,11 +2276,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) if (pipe_iir) { ret = IRQ_HANDLED; I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + if (pipe_iir & GEN8_PIPE_VBLANK && intel_pipe_handle_vblank(dev, pipe)) intel_check_page_flip(dev, pipe); - if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { + if (IS_GEN9(dev)) + flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; + else + flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; + + if (flip_done) { intel_prepare_page_flip(dev, pipe); intel_finish_page_flip_plane(dev, pipe); } @@ -2587,18 +2294,20 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev, pipe); - if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, - false)) - DRM_ERROR("Pipe %c FIFO underrun\n", - pipe_name(pipe)); - } + if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) + intel_cpu_fifo_underrun_irq_handler(dev_priv, + pipe); - if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { + + if (IS_GEN9(dev)) + fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + else + fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; + + if (fault_errors) DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", pipe_name(pipe), pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); - } } else DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); } @@ -3444,8 +3153,8 @@ static void gen8_irq_reset(struct drm_device *dev) gen8_gt_irq_reset(dev_priv); for_each_pipe(dev_priv, pipe) - if (intel_display_power_enabled(dev_priv, - POWER_DOMAIN_PIPE(pipe))) + if (intel_display_power_is_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); GEN5_IRQ_RESET(GEN8_DE_PORT_); @@ -3457,15 +3166,14 @@ static void gen8_irq_reset(struct drm_device *dev) void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) { - unsigned long irqflags; uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } static void cherryview_irq_preinstall(struct drm_device *dev) @@ -3584,7 +3292,6 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) static int ironlake_irq_postinstall(struct drm_device *dev) { - unsigned long irqflags; struct drm_i915_private *dev_priv = dev->dev_private; u32 display_mask, extra_mask; @@ -3623,9 +3330,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev) * spinlocking not required here for correctness since interrupt * setup is guaranteed to run in single-threaded context. But we * need it to make the assert_spin_locked happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } return 0; @@ -3701,7 +3408,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) dev_priv->display_irqs_enabled = true; - if (dev_priv->dev->irq_enabled) + if (intel_irqs_enabled(dev_priv)) valleyview_display_irqs_install(dev_priv); } @@ -3714,14 +3421,13 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) dev_priv->display_irqs_enabled = false; - if (dev_priv->dev->irq_enabled) + if (intel_irqs_enabled(dev_priv)) valleyview_display_irqs_uninstall(dev_priv); } static int valleyview_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long irqflags; dev_priv->irq_mask = ~0; @@ -3735,10 +3441,10 @@ static int valleyview_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) valleyview_display_irqs_install(dev_priv); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff); @@ -3783,18 +3489,26 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) { - uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | - GEN8_PIPE_CDCLK_CRC_DONE | - GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | - GEN8_PIPE_FIFO_UNDERRUN; + uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; + uint32_t de_pipe_enables; int pipe; + + if (IS_GEN9(dev_priv)) + de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | + GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + else + de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | + GEN8_DE_PIPE_IRQ_FAULT_ERRORS; + + de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | + GEN8_PIPE_FIFO_UNDERRUN; + dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; for_each_pipe(dev_priv, pipe) - if (intel_display_power_enabled(dev_priv, + if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], @@ -3829,7 +3543,6 @@ static int cherryview_irq_postinstall(struct drm_device *dev) I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | PIPE_CRC_DONE_INTERRUPT_STATUS; - unsigned long irqflags; int pipe; /* @@ -3841,11 +3554,11 @@ static int cherryview_irq_postinstall(struct drm_device *dev) for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), 0xffff); - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); for_each_pipe(dev_priv, pipe) i915_enable_pipestat(dev_priv, pipe, pipestat_enable); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IMR, dev_priv->irq_mask); @@ -3872,7 +3585,6 @@ static void gen8_irq_uninstall(struct drm_device *dev) static void valleyview_irq_uninstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long irqflags; int pipe; if (!dev_priv) @@ -3887,10 +3599,12 @@ static void valleyview_irq_uninstall(struct drm_device *dev) I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) valleyview_display_irqs_uninstall(dev_priv); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); dev_priv->irq_mask = 0; @@ -3976,7 +3690,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev) static int i8xx_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long irqflags; I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -3999,10 +3712,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); return 0; } @@ -4047,7 +3760,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) struct drm_i915_private *dev_priv = dev->dev_private; u16 iir, new_iir; u32 pipe_stats[2]; - unsigned long irqflags; int pipe; u16 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | @@ -4063,7 +3775,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) i915_handle_error(dev, false, "Command parser error, iir 0x%08x", @@ -4079,7 +3791,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & 0x8000ffff) I915_WRITE(reg, pipe_stats[pipe]); } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock(&dev_priv->irq_lock); I915_WRITE16(IIR, iir & ~flip_mask); new_iir = I915_READ16(IIR); /* Flush posted writes */ @@ -4101,9 +3813,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, + pipe); } iir = new_iir; @@ -4149,7 +3861,6 @@ static int i915_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 enable_mask; - unsigned long irqflags; I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -4187,10 +3898,10 @@ static int i915_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); return 0; } @@ -4234,7 +3945,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) struct drm_device *dev = arg; struct drm_i915_private *dev_priv = dev->dev_private; u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; - unsigned long irqflags; u32 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; @@ -4250,7 +3960,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) i915_handle_error(dev, false, "Command parser error, iir 0x%08x", @@ -4266,7 +3976,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) irq_received = true; } } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock(&dev_priv->irq_lock); if (!irq_received) break; @@ -4297,9 +4007,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, + pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) @@ -4372,7 +4082,6 @@ static int i965_irq_postinstall(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 enable_mask; u32 error_mask; - unsigned long irqflags; /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | @@ -4393,11 +4102,11 @@ static int i965_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); /* * Enable some error detection, note the instruction error mask @@ -4462,7 +4171,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) struct drm_i915_private *dev_priv = dev->dev_private; u32 iir, new_iir; u32 pipe_stats[I915_MAX_PIPES]; - unsigned long irqflags; int ret = IRQ_NONE, pipe; u32 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | @@ -4479,7 +4187,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) i915_handle_error(dev, false, "Command parser error, iir 0x%08x", @@ -4497,7 +4205,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) irq_received = true; } } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock(&dev_priv->irq_lock); if (!irq_received) break; @@ -4527,9 +4235,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) @@ -4584,19 +4291,18 @@ static void i965_irq_uninstall(struct drm_device * dev) I915_WRITE(IIR, I915_READ(IIR)); } -static void intel_hpd_irq_reenable(struct work_struct *work) +static void intel_hpd_irq_reenable_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), hotplug_reenable_work.work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; - unsigned long irqflags; int i; intel_runtime_pm_get(dev_priv); - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { struct drm_connector *connector; @@ -4620,14 +4326,21 @@ static void intel_hpd_irq_reenable(struct work_struct *work) } if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); intel_runtime_pm_put(dev_priv); } -void intel_irq_init(struct drm_device *dev) +/** + * intel_irq_init - initializes irq support + * @dev_priv: i915 device instance + * + * This function initializes all the irq support including work items, timers + * and all the vtables. It does not setup the interrupt itself though. + */ +void intel_irq_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); @@ -4636,7 +4349,7 @@ void intel_irq_init(struct drm_device *dev) INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); /* Let's track the enabled rps events */ - if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) + if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) /* WaGsvRC0ResidencyMethod:vlv */ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; else @@ -4646,17 +4359,14 @@ void intel_irq_init(struct drm_device *dev) i915_hangcheck_elapsed, (unsigned long) dev); INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, - intel_hpd_irq_reenable); + intel_hpd_irq_reenable_work); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - /* Haven't installed the IRQ handler yet */ - dev_priv->pm._irqs_disabled = true; - - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { dev->max_vblank_count = 0; dev->driver->get_vblank_counter = i8xx_get_vblank_counter; - } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } else { @@ -4669,7 +4379,7 @@ void intel_irq_init(struct drm_device *dev) * Gen2 doesn't have a hardware frame counter and so depends on * vblank interrupts to produce sane vblank seuquence numbers. */ - if (!IS_GEN2(dev)) + if (!IS_GEN2(dev_priv)) dev->vblank_disable_immediate = true; if (drm_core_check_feature(dev, DRIVER_MODESET)) { @@ -4677,7 +4387,7 @@ void intel_irq_init(struct drm_device *dev) dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; } - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { dev->driver->irq_handler = cherryview_irq_handler; dev->driver->irq_preinstall = cherryview_irq_preinstall; dev->driver->irq_postinstall = cherryview_irq_postinstall; @@ -4685,7 +4395,7 @@ void intel_irq_init(struct drm_device *dev) dev->driver->enable_vblank = valleyview_enable_vblank; dev->driver->disable_vblank = valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv)) { dev->driver->irq_handler = valleyview_irq_handler; dev->driver->irq_preinstall = valleyview_irq_preinstall; dev->driver->irq_postinstall = valleyview_irq_postinstall; @@ -4693,7 +4403,7 @@ void intel_irq_init(struct drm_device *dev) dev->driver->enable_vblank = valleyview_enable_vblank; dev->driver->disable_vblank = valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (IS_GEN8(dev)) { + } else if (INTEL_INFO(dev_priv)->gen >= 8) { dev->driver->irq_handler = gen8_irq_handler; dev->driver->irq_preinstall = gen8_irq_reset; dev->driver->irq_postinstall = gen8_irq_postinstall; @@ -4710,12 +4420,12 @@ void intel_irq_init(struct drm_device *dev) dev->driver->disable_vblank = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; } else { - if (INTEL_INFO(dev)->gen == 2) { + if (INTEL_INFO(dev_priv)->gen == 2) { dev->driver->irq_preinstall = i8xx_irq_preinstall; dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_uninstall; - } else if (INTEL_INFO(dev)->gen == 3) { + } else if (INTEL_INFO(dev_priv)->gen == 3) { dev->driver->irq_preinstall = i915_irq_preinstall; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_uninstall; @@ -4733,12 +4443,23 @@ void intel_irq_init(struct drm_device *dev) } } -void intel_hpd_init(struct drm_device *dev) +/** + * intel_hpd_init - initializes and enables hpd support + * @dev_priv: i915 device instance + * + * This function enables the hotplug support. It requires that interrupts have + * already been enabled with intel_irq_init_hw(). From this point on hotplug and + * poll request can run concurrently to other code, so locking rules must be + * obeyed. + * + * This is a separate step from interrupt enabling to simplify the locking rules + * in the driver load and resume code. + */ +void intel_hpd_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; - unsigned long irqflags; int i; for (i = 1; i < HPD_NUM_PINS; i++) { @@ -4756,27 +4477,72 @@ void intel_hpd_init(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked checks happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } -/* Disable interrupts so we can allow runtime PM. */ -void intel_runtime_pm_disable_interrupts(struct drm_device *dev) +/** + * intel_irq_install - enables the hardware interrupt + * @dev_priv: i915 device instance + * + * This function enables the hardware interrupt handling, but leaves the hotplug + * handling still disabled. It is called after intel_irq_init(). + * + * In the driver load and resume code we need working interrupts in a few places + * but don't want to deal with the hassle of concurrent probe and hotplug + * workers. Hence the split into this two-stage approach. + */ +int intel_irq_install(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + /* + * We enable some interrupt sources in our postinstall hooks, so mark + * interrupts as enabled _before_ actually enabling them to avoid + * special cases in our ordering checks. + */ + dev_priv->pm.irqs_enabled = true; - dev->driver->irq_uninstall(dev); - dev_priv->pm._irqs_disabled = true; + return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); } -/* Restore interrupts so we can recover from runtime PM. */ -void intel_runtime_pm_restore_interrupts(struct drm_device *dev) +/** + * intel_irq_uninstall - finilizes all irq handling + * @dev_priv: i915 device instance + * + * This stops interrupt and hotplug handling and unregisters and frees all + * resources acquired in the init functions. + */ +void intel_irq_uninstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + drm_irq_uninstall(dev_priv->dev); + intel_hpd_cancel_work(dev_priv); + dev_priv->pm.irqs_enabled = false; +} - dev_priv->pm._irqs_disabled = false; - dev->driver->irq_preinstall(dev); - dev->driver->irq_postinstall(dev); +/** + * intel_runtime_pm_disable_interrupts - runtime interrupt disabling + * @dev_priv: i915 device instance + * + * This function is used to disable interrupts at runtime, both in the runtime + * pm and the system suspend/resume code. + */ +void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) +{ + dev_priv->dev->driver->irq_uninstall(dev_priv->dev); + dev_priv->pm.irqs_enabled = false; +} + +/** + * intel_runtime_pm_enable_interrupts - runtime interrupt enabling + * @dev_priv: i915 device instance + * + * This function is used to enable interrupts at runtime, both in the runtime + * pm and the system suspend/resume code. + */ +void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) +{ + dev_priv->pm.irqs_enabled = true; + dev_priv->dev->driver->irq_preinstall(dev_priv->dev); + dev_priv->dev->driver->irq_postinstall(dev_priv->dev); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c01e5f31430e..77fce96b54a5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -26,8 +26,8 @@ #define _I915_REG_H_ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) +#define _PLANE(plane, a, b) _PIPE(plane, a, b) #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) - #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ (pipe) == PIPE_B ? (b) : (c)) @@ -672,7 +672,7 @@ enum punit_power_well { * need to be accessed during AUX communication, * * Generally the common lane corresponds to the pipe and - * the spline (PCS/TX) correponds to the port. + * the spline (PCS/TX) corresponds to the port. * * For dual channel PHY (VLV/CHV): * @@ -796,6 +796,8 @@ enum punit_power_well { #define _VLV_PCS_DW0_CH1 0x8400 #define DPIO_PCS_TX_LANE2_RESET (1<<16) #define DPIO_PCS_TX_LANE1_RESET (1<<7) +#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4) +#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3) #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1) #define _VLV_PCS01_DW0_CH0 0x200 @@ -836,12 +838,31 @@ enum punit_power_well { #define _VLV_PCS_DW9_CH0 0x8224 #define _VLV_PCS_DW9_CH1 0x8424 +#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13) +#define DPIO_PCS_TX2MARGIN_000 (0<<13) +#define DPIO_PCS_TX2MARGIN_101 (1<<13) +#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10) +#define DPIO_PCS_TX1MARGIN_000 (0<<10) +#define DPIO_PCS_TX1MARGIN_101 (1<<10) #define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1) +#define _VLV_PCS01_DW9_CH0 0x224 +#define _VLV_PCS23_DW9_CH0 0x424 +#define _VLV_PCS01_DW9_CH1 0x2624 +#define _VLV_PCS23_DW9_CH1 0x2824 +#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1) +#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1) + #define _CHV_PCS_DW10_CH0 0x8228 #define _CHV_PCS_DW10_CH1 0x8428 #define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30) #define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31) +#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24) +#define DPIO_PCS_TX2DEEMP_9P5 (0<<24) +#define DPIO_PCS_TX2DEEMP_6P0 (2<<24) +#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16) +#define DPIO_PCS_TX1DEEMP_9P5 (0<<16) +#define DPIO_PCS_TX1DEEMP_6P0 (2<<16) #define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1) #define _VLV_PCS01_DW10_CH0 0x0228 @@ -853,8 +874,18 @@ enum punit_power_well { #define _VLV_PCS_DW11_CH0 0x822c #define _VLV_PCS_DW11_CH1 0x842c +#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3) +#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1) +#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0) #define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1) +#define _VLV_PCS01_DW11_CH0 0x022c +#define _VLV_PCS23_DW11_CH0 0x042c +#define _VLV_PCS01_DW11_CH1 0x262c +#define _VLV_PCS23_DW11_CH1 0x282c +#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1) +#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1) + #define _VLV_PCS_DW12_CH0 0x8230 #define _VLV_PCS_DW12_CH1 0x8430 #define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1) @@ -2506,9 +2537,7 @@ enum punit_power_well { #define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10) #define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14) -#define EDP_PSR_DPCD_COMMAND 0x80060000 #define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18) -#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24) #define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c) #define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20) #define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24) @@ -3645,6 +3674,7 @@ enum punit_power_well { #define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) #define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) #define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 +#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) /* * Computing GMCH M and N values for the Display Port link @@ -4024,17 +4054,18 @@ enum punit_power_well { #define DSPFW_PLANEA_WM1_HI_MASK (1<<0) /* drain latency register values*/ +#define DRAIN_LATENCY_PRECISION_16 16 #define DRAIN_LATENCY_PRECISION_32 32 #define DRAIN_LATENCY_PRECISION_64 64 #define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) -#define DDL_CURSOR_PRECISION_64 (1<<31) -#define DDL_CURSOR_PRECISION_32 (0<<31) +#define DDL_CURSOR_PRECISION_HIGH (1<<31) +#define DDL_CURSOR_PRECISION_LOW (0<<31) #define DDL_CURSOR_SHIFT 24 -#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite))) -#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite))) +#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite))) +#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite))) #define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) -#define DDL_PLANE_PRECISION_64 (1<<7) -#define DDL_PLANE_PRECISION_32 (0<<7) +#define DDL_PLANE_PRECISION_HIGH (1<<7) +#define DDL_PLANE_PRECISION_LOW (0<<7) #define DDL_PLANE_SHIFT 0 #define DRAIN_LATENCY_MASK 0x7f @@ -4177,6 +4208,7 @@ enum punit_power_well { #define MCURSOR_PIPE_A 0x00 #define MCURSOR_PIPE_B (1 << 28) #define MCURSOR_GAMMA_ENABLE (1 << 26) +#define CURSOR_ROTATE_180 (1<<15) #define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) #define _CURABASE 0x70084 #define _CURAPOS 0x70088 @@ -4510,6 +4542,146 @@ enum punit_power_well { #define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA) #define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC) +/* Skylake plane registers */ + +#define _PLANE_CTL_1_A 0x70180 +#define _PLANE_CTL_2_A 0x70280 +#define _PLANE_CTL_3_A 0x70380 +#define PLANE_CTL_ENABLE (1 << 31) +#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30) +#define PLANE_CTL_FORMAT_MASK (0xf << 24) +#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24) +#define PLANE_CTL_FORMAT_NV12 ( 1 << 24) +#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24) +#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24) +#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24) +#define PLANE_CTL_FORMAT_AYUV ( 8 << 24) +#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24) +#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24) +#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) +#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21) +#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21) +#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21) +#define PLANE_CTL_ORDER_BGRX (0 << 20) +#define PLANE_CTL_ORDER_RGBX (1 << 20) +#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) +#define PLANE_CTL_YUV422_YUYV ( 0 << 16) +#define PLANE_CTL_YUV422_UYVY ( 1 << 16) +#define PLANE_CTL_YUV422_YVYU ( 2 << 16) +#define PLANE_CTL_YUV422_VYUY ( 3 << 16) +#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15) +#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) +#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) +#define PLANE_CTL_TILED_MASK (0x7 << 10) +#define PLANE_CTL_TILED_LINEAR ( 0 << 10) +#define PLANE_CTL_TILED_X ( 1 << 10) +#define PLANE_CTL_TILED_Y ( 4 << 10) +#define PLANE_CTL_TILED_YF ( 5 << 10) +#define PLANE_CTL_ALPHA_MASK (0x3 << 4) +#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4) +#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4) +#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4) +#define PLANE_CTL_ROTATE_MASK 0x3 +#define PLANE_CTL_ROTATE_0 0x0 +#define PLANE_CTL_ROTATE_180 0x2 +#define _PLANE_STRIDE_1_A 0x70188 +#define _PLANE_STRIDE_2_A 0x70288 +#define _PLANE_STRIDE_3_A 0x70388 +#define _PLANE_POS_1_A 0x7018c +#define _PLANE_POS_2_A 0x7028c +#define _PLANE_POS_3_A 0x7038c +#define _PLANE_SIZE_1_A 0x70190 +#define _PLANE_SIZE_2_A 0x70290 +#define _PLANE_SIZE_3_A 0x70390 +#define _PLANE_SURF_1_A 0x7019c +#define _PLANE_SURF_2_A 0x7029c +#define _PLANE_SURF_3_A 0x7039c +#define _PLANE_OFFSET_1_A 0x701a4 +#define _PLANE_OFFSET_2_A 0x702a4 +#define _PLANE_OFFSET_3_A 0x703a4 +#define _PLANE_KEYVAL_1_A 0x70194 +#define _PLANE_KEYVAL_2_A 0x70294 +#define _PLANE_KEYMSK_1_A 0x70198 +#define _PLANE_KEYMSK_2_A 0x70298 +#define _PLANE_KEYMAX_1_A 0x701a0 +#define _PLANE_KEYMAX_2_A 0x702a0 + +#define _PLANE_CTL_1_B 0x71180 +#define _PLANE_CTL_2_B 0x71280 +#define _PLANE_CTL_3_B 0x71380 +#define _PLANE_CTL_1(pipe) _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B) +#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B) +#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B) +#define PLANE_CTL(pipe, plane) \ + _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe)) + +#define _PLANE_STRIDE_1_B 0x71188 +#define _PLANE_STRIDE_2_B 0x71288 +#define _PLANE_STRIDE_3_B 0x71388 +#define _PLANE_STRIDE_1(pipe) \ + _PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B) +#define _PLANE_STRIDE_2(pipe) \ + _PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B) +#define _PLANE_STRIDE_3(pipe) \ + _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B) +#define PLANE_STRIDE(pipe, plane) \ + _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe)) + +#define _PLANE_POS_1_B 0x7118c +#define _PLANE_POS_2_B 0x7128c +#define _PLANE_POS_3_B 0x7138c +#define _PLANE_POS_1(pipe) _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B) +#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B) +#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B) +#define PLANE_POS(pipe, plane) \ + _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe)) + +#define _PLANE_SIZE_1_B 0x71190 +#define _PLANE_SIZE_2_B 0x71290 +#define _PLANE_SIZE_3_B 0x71390 +#define _PLANE_SIZE_1(pipe) _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B) +#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B) +#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B) +#define PLANE_SIZE(pipe, plane) \ + _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe)) + +#define _PLANE_SURF_1_B 0x7119c +#define _PLANE_SURF_2_B 0x7129c +#define _PLANE_SURF_3_B 0x7139c +#define _PLANE_SURF_1(pipe) _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B) +#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B) +#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B) +#define PLANE_SURF(pipe, plane) \ + _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe)) + +#define _PLANE_OFFSET_1_B 0x711a4 +#define _PLANE_OFFSET_2_B 0x712a4 +#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B) +#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B) +#define PLANE_OFFSET(pipe, plane) \ + _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe)) + +#define _PLANE_KEYVAL_1_B 0x71194 +#define _PLANE_KEYVAL_2_B 0x71294 +#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B) +#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B) +#define PLANE_KEYVAL(pipe, plane) \ + _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe)) + +#define _PLANE_KEYMSK_1_B 0x71198 +#define _PLANE_KEYMSK_2_B 0x71298 +#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B) +#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B) +#define PLANE_KEYMSK(pipe, plane) \ + _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe)) + +#define _PLANE_KEYMAX_1_B 0x711a0 +#define _PLANE_KEYMAX_2_B 0x712a0 +#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B) +#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B) +#define PLANE_KEYMAX(pipe, plane) \ + _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe)) + /* VBIOS regs */ #define VGACNTRL 0x71400 # define VGA_DISP_DISABLE (1 << 31) @@ -4746,10 +4918,23 @@ enum punit_power_well { #define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2) #define GEN8_PIPE_VSYNC (1 << 1) #define GEN8_PIPE_VBLANK (1 << 0) +#define GEN9_PIPE_CURSOR_FAULT (1 << 11) +#define GEN9_PIPE_PLANE3_FAULT (1 << 9) +#define GEN9_PIPE_PLANE2_FAULT (1 << 8) +#define GEN9_PIPE_PLANE1_FAULT (1 << 7) +#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5) +#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4) +#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3) +#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p)) #define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \ (GEN8_PIPE_CURSOR_FAULT | \ GEN8_PIPE_SPRITE_FAULT | \ GEN8_PIPE_PRIMARY_FAULT) +#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \ + (GEN9_PIPE_CURSOR_FAULT | \ + GEN9_PIPE_PLANE3_FAULT | \ + GEN9_PIPE_PLANE2_FAULT | \ + GEN9_PIPE_PLANE1_FAULT) #define GEN8_DE_PORT_ISR 0x44440 #define GEN8_DE_PORT_IMR 0x44444 @@ -4839,6 +5024,7 @@ enum punit_power_well { /* GEN8 chicken */ #define HDC_CHICKEN0 0x7300 #define HDC_FORCE_NON_COHERENT (1<<4) +#define HDC_FENCE_DEST_SLM_DISABLE (1<<14) /* WaCatErrorRejectionIssue */ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 @@ -5751,6 +5937,9 @@ enum punit_power_well { #define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) #define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) +#define GEN9_HALF_SLICE_CHICKEN5 0xe188 +#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5) + #define GEN8_ROW_CHICKEN 0xe4f0 #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) #define STALL_DOP_GATING_DISABLE (1<<5) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 503847f18fdd..4a5af695307e 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -139,8 +139,6 @@ static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); static struct attribute *rc6_attrs[] = { &dev_attr_rc6_enable.attr, &dev_attr_rc6_residency_ms.attr, - &dev_attr_rc6p_residency_ms.attr, - &dev_attr_rc6pp_residency_ms.attr, NULL }; @@ -148,6 +146,17 @@ static struct attribute_group rc6_attr_group = { .name = power_group_name, .attrs = rc6_attrs }; + +static struct attribute *rc6p_attrs[] = { + &dev_attr_rc6p_residency_ms.attr, + &dev_attr_rc6pp_residency_ms.attr, + NULL +}; + +static struct attribute_group rc6p_attr_group = { + .name = power_group_name, + .attrs = rc6p_attrs +}; #endif static int l3_access_valid(struct drm_device *dev, loff_t offset) @@ -595,12 +604,18 @@ void i915_setup_sysfs(struct drm_device *dev) int ret; #ifdef CONFIG_PM - if (INTEL_INFO(dev)->gen >= 6) { + if (HAS_RC6(dev)) { ret = sysfs_merge_group(&dev->primary->kdev->kobj, &rc6_attr_group); if (ret) DRM_ERROR("RC6 residency sysfs setup failed\n"); } + if (HAS_RC6p(dev)) { + ret = sysfs_merge_group(&dev->primary->kdev->kobj, + &rc6p_attr_group); + if (ret) + DRM_ERROR("RC6p residency sysfs setup failed\n"); + } #endif if (HAS_L3_DPF(dev)) { ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs); @@ -640,5 +655,6 @@ void i915_teardown_sysfs(struct drm_device *dev) device_remove_bin_file(dev->primary->kdev, &dpf_attrs); #ifdef CONFIG_PM sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group); + sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group); #endif } diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 905999bee2ac..7603765c91fc 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -46,7 +46,7 @@ struct bdb_header { u16 version; /**< decimal */ u16 header_size; /**< in bytes */ u16 bdb_size; /**< in bytes */ -}; +} __packed; /* strictly speaking, this is a "skip" block, but it has interesting info */ struct vbios_data { @@ -252,7 +252,7 @@ union child_device_config { /* This one should also be safe to use anywhere, even without version * checks. */ struct common_child_dev_config common; -}; +} __packed; struct bdb_general_definitions { /* DDC GPIO */ @@ -888,12 +888,12 @@ struct mipi_pps_data { u16 bl_disable_delay; u16 panel_off_delay; u16 panel_power_cycle_delay; -}; +} __packed; struct bdb_mipi_config { struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; -}; +} __packed; /* Block 53 contains MIPI sequences as needed by the panel * for enabling it. This block can be variable in size and @@ -902,7 +902,7 @@ struct bdb_mipi_config { struct bdb_mipi_sequence { u8 version; u8 data[0]; -}; +} __packed; /* MIPI Sequnece Block definitions */ enum mipi_seq { diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9212e6504e0f..a9af9a4866db 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -72,7 +72,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, u32 tmp; power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_enabled(dev_priv, power_domain)) + if (!intel_display_power_is_enabled(dev_priv, power_domain)) return false; tmp = I915_READ(crt->adpa_reg); @@ -775,7 +775,7 @@ static void intel_crt_reset(struct drm_connector *connector) I915_WRITE(crt->adpa_reg, adpa); POSTING_READ(crt->adpa_reg); - DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); + DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa); crt->force_hotplug_required = 1; } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b63d4fa204a3..cb5367c6f95a 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -95,8 +95,8 @@ static const struct ddi_buf_trans bdw_ddi_translations_dp[] = { { 0x00BEFFFF, 0x00140006 }, { 0x80B2CFFF, 0x001B0002 }, { 0x00FFFFFF, 0x000E000A }, - { 0x00D75FFF, 0x00180004 }, - { 0x80CB2FFF, 0x001B0002 }, + { 0x00DB6FFF, 0x00160005 }, + { 0x80C71FFF, 0x001A0002 }, { 0x00F7DFFF, 0x00180004 }, { 0x80D75FFF, 0x001B0002 }, }; @@ -127,6 +127,32 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = { { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */ }; +static const struct ddi_buf_trans skl_ddi_translations_dp[] = { + { 0x00000018, 0x000000a0 }, + { 0x00004014, 0x00000098 }, + { 0x00006012, 0x00000088 }, + { 0x00008010, 0x00000080 }, + { 0x00000018, 0x00000098 }, + { 0x00004014, 0x00000088 }, + { 0x00006012, 0x00000080 }, + { 0x00000018, 0x00000088 }, + { 0x00004014, 0x00000080 }, +}; + +static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { + /* Idx NT mV T mV db */ + { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */ + { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */ + { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */ + { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */ + { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */ + { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */ + { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */ + { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */ + { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */ + { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */ +}; + enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; @@ -169,7 +195,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) const struct ddi_buf_trans *ddi_translations_hdmi; const struct ddi_buf_trans *ddi_translations; - if (IS_BROADWELL(dev)) { + if (IS_SKYLAKE(dev)) { + ddi_translations_fdi = NULL; + ddi_translations_dp = skl_ddi_translations_dp; + ddi_translations_edp = skl_ddi_translations_dp; + ddi_translations_hdmi = skl_ddi_translations_hdmi; + n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); + hdmi_800mV_0dB = 7; + } else if (IS_BROADWELL(dev)) { ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_edp = bdw_ddi_translations_edp; @@ -208,7 +241,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) ddi_translations = ddi_translations_dp; break; case PORT_E: - ddi_translations = ddi_translations_fdi; + if (ddi_translations_fdi) + ddi_translations = ddi_translations_fdi; + else + ddi_translations = ddi_translations_dp; break; default: BUG(); @@ -962,7 +998,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) uint32_t tmp; power_domain = intel_display_port_power_domain(intel_encoder); - if (!intel_display_power_enabled(dev_priv, power_domain)) + if (!intel_display_power_is_enabled(dev_priv, power_domain)) return false; if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) @@ -1008,7 +1044,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, int i; power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_enabled(dev_priv, power_domain)) + if (!intel_display_power_is_enabled(dev_priv, power_domain)) return false; tmp = I915_READ(DDI_BUF_CTL(port)); @@ -1255,7 +1291,7 @@ static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv) return 450000; else if (freq == LCPLL_CLK_FREQ_450) return 450000; - else if (IS_ULT(dev)) + else if (IS_HSW_ULT(dev)) return 337500; else return 540000; @@ -1296,7 +1332,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, { uint32_t val; - if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) return false; val = I915_READ(WRPLL_CTL(pll->id)); @@ -1486,7 +1522,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, break; } - if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { + if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4))) pipe_config->has_audio = true; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f0a1a56406eb..31bb1d7acf9b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -73,8 +73,6 @@ static const uint32_t intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -static void intel_increase_pllclock(struct drm_device *dev, - enum pipe pipe); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); static void i9xx_crtc_clock_get(struct intel_crtc *crtc, @@ -408,22 +406,22 @@ static void vlv_clock(int refclk, intel_clock_t *clock) /** * Returns whether any output on the specified pipe is of the specified type */ -static bool intel_pipe_has_type(struct drm_crtc *crtc, int type) +static bool intel_pipe_has_type(struct intel_crtc *crtc, int type) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; struct intel_encoder *encoder; - for_each_encoder_on_crtc(dev, crtc, encoder) + for_each_encoder_on_crtc(dev, &crtc->base, encoder) if (encoder->type == type) return true; return false; } -static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, +static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc, int refclk) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { @@ -444,9 +442,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, return limit; } -static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) +static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { @@ -465,9 +463,9 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) return limit; } -static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) +static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; const intel_limit_t *limit; if (HAS_PCH_SPLIT(dev)) @@ -578,11 +576,11 @@ static bool intel_PLL_is_valid(struct drm_device *dev, } static bool -i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, +i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; intel_clock_t clock; int err = target; @@ -639,11 +637,11 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, } static bool -pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, +pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; intel_clock_t clock; int err = target; @@ -698,11 +696,11 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, } static bool -g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, +g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; intel_clock_t clock; int max_n; bool found; @@ -755,11 +753,11 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, } static bool -vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, +vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; intel_clock_t clock; unsigned int bestppm = 1000000; /* min update 19.2 MHz */ @@ -812,11 +810,11 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, } static bool -chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, +chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; intel_clock_t clock; uint64_t m2; int found = false; @@ -889,60 +887,6 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, return intel_crtc->config.cpu_transcoder; } -static void g4x_wait_for_vblank(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe); - - frame = I915_READ(frame_reg); - - if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) - WARN(1, "vblank wait on pipe %c timed out\n", - pipe_name(pipe)); -} - -/** - * intel_wait_for_vblank - wait for vblank on a given pipe - * @dev: drm device - * @pipe: pipe to wait for - * - * Wait for vblank to occur on a given pipe. Needed for various bits of - * mode setting code. - */ -void intel_wait_for_vblank(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipestat_reg = PIPESTAT(pipe); - - if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { - g4x_wait_for_vblank(dev, pipe); - return; - } - - /* Clear existing vblank status. Note this will clear any other - * sticky status fields as well. - * - * This races with i915_driver_irq_handler() with the result - * that either function could miss a vblank event. Here it is not - * fatal, as we will either wait upon the next vblank interrupt or - * timeout. Generally speaking intel_wait_for_vblank() is only - * called during modeset at which time the GPU should be idle and - * should *not* be performing page flips and thus not waiting on - * vblanks... - * Currently, the result of us stealing a vblank from the irq - * handler is that a single frame will be skipped during swapbuffers. - */ - I915_WRITE(pipestat_reg, - I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); - - /* Wait for vblank interrupt bit to set */ - if (wait_for(I915_READ(pipestat_reg) & - PIPE_VBLANK_INTERRUPT_STATUS, - 50)) - DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n", - pipe_name(pipe)); -} - static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1189,8 +1133,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, state_string(state), state_string(cur_state)); } -static void assert_panel_unlocked(struct drm_i915_private *dev_priv, - enum pipe pipe) +void assert_panel_unlocked(struct drm_i915_private *dev_priv, + enum pipe pipe) { struct drm_device *dev = dev_priv->dev; int pp_reg; @@ -1263,7 +1207,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) state = true; - if (!intel_display_power_enabled(dev_priv, + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { cur_state = false; } else { @@ -1332,7 +1276,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, int reg, sprite; u32 val; - if (IS_VALLEYVIEW(dev)) { + if (INTEL_INFO(dev)->gen >= 9) { + for_each_sprite(pipe, sprite) { + val = I915_READ(PLANE_CTL(pipe, sprite)); + WARN(val & PLANE_CTL_ENABLE, + "plane %d assertion failure, should be off on pipe %c but is still active\n", + sprite, pipe_name(pipe)); + } + } else if (IS_VALLEYVIEW(dev)) { for_each_sprite(pipe, sprite) { reg = SPCNTR(pipe, sprite); val = I915_READ(reg); @@ -1616,7 +1567,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev) for_each_intel_crtc(dev, crtc) count += crtc->active && - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO); + intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); return count; } @@ -1695,7 +1646,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc) /* Disable DVO 2x clock on both PLLs if necessary */ if (IS_I830(dev) && - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) && + intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && intel_num_dvo_pipes(dev) == 1) { I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); @@ -1933,7 +1884,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, val &= ~TRANS_INTERLACE_MASK; if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) if (HAS_PCH_IBX(dev_priv->dev) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) + intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) val |= TRANS_LEGACY_INTERLACED_ILK; else val |= TRANS_INTERLACED; @@ -2056,7 +2007,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) * need the check. */ if (!HAS_PCH_SPLIT(dev_priv->dev)) - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI)) + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); @@ -2233,7 +2184,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, switch (obj->tiling_mode) { case I915_TILING_NONE: - if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) + if (INTEL_INFO(dev)->gen >= 9) + alignment = 256 * 1024; + else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) alignment = 128 * 1024; else if (INTEL_INFO(dev)->gen >= 4) alignment = 4 * 1024; @@ -2241,8 +2194,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, alignment = 64 * 1024; break; case I915_TILING_X: - /* pin() will align the object as required by fence */ - alignment = 0; + if (INTEL_INFO(dev)->gen >= 9) + alignment = 256 * 1024; + else { + /* pin() will align the object as required by fence */ + alignment = 0; + } break; case I915_TILING_Y: WARN(1, "Y tiled bo slipped through, driver bug!\n"); @@ -2402,6 +2359,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc, struct intel_plane_config *plane_config) { struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *c; struct intel_crtc *i; struct drm_i915_gem_object *obj; @@ -2433,6 +2391,9 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc, continue; if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { + if (obj->tiling_mode != I915_TILING_NONE) + dev_priv->preserve_bios_swizzle = true; + drm_framebuffer_reference(c->primary->fb); intel_crtc->base.primary->fb = c->primary->fb; obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); @@ -2672,6 +2633,92 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, POSTING_READ(reg); } +static void skylake_update_primary_plane(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj; + int pipe = intel_crtc->pipe; + u32 plane_ctl, stride; + + if (!intel_crtc->primary_enabled) { + I915_WRITE(PLANE_CTL(pipe, 0), 0); + I915_WRITE(PLANE_SURF(pipe, 0), 0); + POSTING_READ(PLANE_CTL(pipe, 0)); + return; + } + + plane_ctl = PLANE_CTL_ENABLE | + PLANE_CTL_PIPE_GAMMA_ENABLE | + PLANE_CTL_PIPE_CSC_ENABLE; + + switch (fb->pixel_format) { + case DRM_FORMAT_RGB565: + plane_ctl |= PLANE_CTL_FORMAT_RGB_565; + break; + case DRM_FORMAT_XRGB8888: + plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; + break; + case DRM_FORMAT_XBGR8888: + plane_ctl |= PLANE_CTL_ORDER_RGBX; + plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; + break; + case DRM_FORMAT_XRGB2101010: + plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; + break; + case DRM_FORMAT_XBGR2101010: + plane_ctl |= PLANE_CTL_ORDER_RGBX; + plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; + break; + default: + BUG(); + } + + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; + + /* + * The stride is either expressed as a multiple of 64 bytes chunks for + * linear buffers or in number of tiles for tiled buffers. + */ + switch (obj->tiling_mode) { + case I915_TILING_NONE: + stride = fb->pitches[0] >> 6; + break; + case I915_TILING_X: + plane_ctl |= PLANE_CTL_TILED_X; + stride = fb->pitches[0] >> 9; + break; + default: + BUG(); + } + + plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; + if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) + plane_ctl |= PLANE_CTL_ROTATE_180; + + I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); + + DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n", + i915_gem_obj_ggtt_offset(obj), + x, y, fb->width, fb->height, + fb->pitches[0]); + + I915_WRITE(PLANE_POS(pipe, 0), 0); + I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x); + I915_WRITE(PLANE_SIZE(pipe, 0), + (intel_crtc->config.pipe_src_h - 1) << 16 | + (intel_crtc->config.pipe_src_w - 1)); + I915_WRITE(PLANE_STRIDE(pipe, 0), stride); + I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj)); + + POSTING_READ(PLANE_SURF(pipe, 0)); +} + /* Assume fb object is pinned & idle & fenced and just update base pointers */ static int intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -2682,7 +2729,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, if (dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); - intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe); dev_priv->display.update_primary_plane(crtc, fb, x, y); @@ -2762,20 +2808,58 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned long flags; bool pending; if (i915_reset_in_progress(&dev_priv->gpu_error) || intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) return false; - spin_lock_irqsave(&dev->event_lock, flags); + spin_lock_irq(&dev->event_lock); pending = to_intel_crtc(crtc)->unpin_work != NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock_irq(&dev->event_lock); return pending; } +static void intel_update_pipe_size(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + const struct drm_display_mode *adjusted_mode; + + if (!i915.fastboot) + return; + + /* + * Update pipe size and adjust fitter if needed: the reason for this is + * that in compute_mode_changes we check the native mode (not the pfit + * mode) to see if we can flip rather than do a full mode set. In the + * fastboot case, we'll flip, but if we don't update the pipesrc and + * pfit state, we'll end up with a big fb scanned out into the wrong + * sized surface. + * + * To fix this properly, we need to hoist the checks up into + * compute_mode_changes (or above), check the actual pfit state and + * whether the platform allows pfit disable with pipe active, and only + * then update the pipesrc and pfit state, even on the flip path. + */ + + adjusted_mode = &crtc->config.adjusted_mode; + + I915_WRITE(PIPESRC(crtc->pipe), + ((adjusted_mode->crtc_hdisplay - 1) << 16) | + (adjusted_mode->crtc_vdisplay - 1)); + if (!crtc->config.pch_pfit.enabled && + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { + I915_WRITE(PF_CTL(crtc->pipe), 0); + I915_WRITE(PF_WIN_POS(crtc->pipe), 0); + I915_WRITE(PF_WIN_SZ(crtc->pipe), 0); + } + crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay; + crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; +} + static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *fb) @@ -2818,36 +2902,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } - /* - * Update pipe size and adjust fitter if needed: the reason for this is - * that in compute_mode_changes we check the native mode (not the pfit - * mode) to see if we can flip rather than do a full mode set. In the - * fastboot case, we'll flip, but if we don't update the pipesrc and - * pfit state, we'll end up with a big fb scanned out into the wrong - * sized surface. - * - * To fix this properly, we need to hoist the checks up into - * compute_mode_changes (or above), check the actual pfit state and - * whether the platform allows pfit disable with pipe active, and only - * then update the pipesrc and pfit state, even on the flip path. - */ - if (i915.fastboot) { - const struct drm_display_mode *adjusted_mode = - &intel_crtc->config.adjusted_mode; - - I915_WRITE(PIPESRC(intel_crtc->pipe), - ((adjusted_mode->crtc_hdisplay - 1) << 16) | - (adjusted_mode->crtc_vdisplay - 1)); - if (!intel_crtc->config.pch_pfit.enabled && - (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { - I915_WRITE(PF_CTL(intel_crtc->pipe), 0); - I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); - I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); - } - intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay; - intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; - } + intel_update_pipe_size(intel_crtc); dev_priv->display.update_primary_plane(crtc, fb, x, y); @@ -3472,14 +3527,13 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) !intel_crtc_has_pending_flip(crtc), 60*HZ) == 0)) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned long flags; - spin_lock_irqsave(&dev->event_lock, flags); + spin_lock_irq(&dev->event_lock); if (intel_crtc->unpin_work) { WARN_ONCE(1, "Removing stuck page flip\n"); page_flip_completed(intel_crtc); } - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock_irq(&dev->event_lock); } if (crtc->primary->fb) { @@ -3705,8 +3759,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && - (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { + (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) || + intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_EDP))) { u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); @@ -3983,7 +4037,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc) return; if (!HAS_PCH_SPLIT(dev_priv->dev)) { - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); @@ -4038,10 +4092,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - assert_vblank_disabled(crtc); - - drm_vblank_on(dev, pipe); - intel_enable_primary_hw_plane(crtc->primary, crtc); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); @@ -4087,10 +4137,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) * consider this a flip to a NULL plane. */ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); - - drm_vblank_off(dev, pipe); - - assert_vblank_disabled(crtc); } static void ironlake_crtc_enable(struct drm_crtc *crtc) @@ -4123,8 +4169,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; - intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); - intel_set_pch_fifo_underrun_reporting(dev, pipe, true); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) @@ -4160,6 +4206,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (HAS_PCH_CPT(dev)) cpt_verify_modeset(dev, intel_crtc->pipe); + assert_vblank_disabled(crtc); + drm_crtc_vblank_on(crtc); + intel_crtc_enable_planes(crtc); } @@ -4235,13 +4284,14 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; - intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) encoder->pre_enable(encoder); if (intel_crtc->config.has_pch_encoder) { - intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, + true); dev_priv->display.fdi_link_train(crtc); } @@ -4272,6 +4322,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_opregion_notify_encoder(encoder, true); } + assert_vblank_disabled(crtc); + drm_crtc_vblank_on(crtc); + /* If we change the relative order between pipe/planes enabling, we need * to change the workaround. */ haswell_mode_set_planes_workaround(intel_crtc); @@ -4307,11 +4360,14 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_crtc_disable_planes(crtc); + drm_crtc_vblank_off(crtc); + assert_vblank_disabled(crtc); + for_each_encoder_on_crtc(dev, crtc, encoder) encoder->disable(encoder); if (intel_crtc->config.has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev, pipe, false); + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); intel_disable_pipe(intel_crtc); @@ -4325,7 +4381,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) ironlake_fdi_disable(crtc); ironlake_disable_pch_transcoder(dev_priv, pipe); - intel_set_pch_fifo_underrun_reporting(dev, pipe, true); + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); if (HAS_PCH_CPT(dev)) { /* disable TRANS_DP_CTL */ @@ -4369,13 +4425,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) intel_crtc_disable_planes(crtc); + drm_crtc_vblank_off(crtc); + assert_vblank_disabled(crtc); + for_each_encoder_on_crtc(dev, crtc, encoder) { intel_opregion_notify_encoder(encoder, false); encoder->disable(encoder); } if (intel_crtc->config.has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, + false); intel_disable_pipe(intel_crtc); if (intel_crtc->config.dp_encoder_is_mst) @@ -4389,7 +4449,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) if (intel_crtc->config.has_pch_encoder) { lpt_disable_pch_transcoder(dev_priv); - intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, + true); intel_ddi_fdi_disable(crtc); } @@ -4510,20 +4571,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) return mask; } -void intel_display_set_init_power(struct drm_i915_private *dev_priv, - bool enable) -{ - if (dev_priv->power_domains.init_power_on == enable) - return; - - if (enable) - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - else - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - - dev_priv->power_domains.init_power_on = enable; -} - static void modeset_update_crtc_power_domains(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4577,7 +4624,7 @@ static void vlv_update_cdclk(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev); - DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz", + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", dev_priv->vlv_cdclk_freq); /* @@ -4780,6 +4827,7 @@ static void valleyview_modeset_global_resources(struct drm_device *dev) static void valleyview_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; @@ -4790,7 +4838,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->active) return; - is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); + is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); if (!is_dsi) { if (IS_CHERRYVIEW(dev)) @@ -4808,7 +4856,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; - intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_pll_enable) @@ -4835,10 +4883,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) for_each_encoder_on_crtc(dev, crtc, encoder) encoder->enable(encoder); + assert_vblank_disabled(crtc); + drm_crtc_vblank_on(crtc); + intel_crtc_enable_planes(crtc); /* Underruns don't raise interrupts, so check manually. */ - i9xx_check_fifo_underruns(dev); + i9xx_check_fifo_underruns(dev_priv); } static void i9xx_set_pll_dividers(struct intel_crtc *crtc) @@ -4853,6 +4904,7 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc) static void i9xx_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; @@ -4874,7 +4926,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; if (!IS_GEN2(dev)) - intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) @@ -4892,6 +4944,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) for_each_encoder_on_crtc(dev, crtc, encoder) encoder->enable(encoder); + assert_vblank_disabled(crtc); + drm_crtc_vblank_on(crtc); + intel_crtc_enable_planes(crtc); /* @@ -4902,10 +4957,10 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) * but leave the pipe running. */ if (IS_GEN2(dev)) - intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); /* Underruns don't raise interrupts, so check manually. */ - i9xx_check_fifo_underruns(dev); + i9xx_check_fifo_underruns(dev_priv); } static void i9xx_pfit_disable(struct intel_crtc *crtc) @@ -4941,7 +4996,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) * but leave the pipe running. */ if (IS_GEN2(dev)) - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); /* * Vblank time updates from the shadow to live plane control register @@ -4955,9 +5010,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_set_memory_cxsr(dev_priv, false); intel_crtc_disable_planes(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->disable(encoder); - /* * On gen2 planes are double buffered but the pipe isn't, so we must * wait for planes to fully turn off before disabling the pipe. @@ -4966,6 +5018,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) */ intel_wait_for_vblank(dev, pipe); + drm_crtc_vblank_off(crtc); + assert_vblank_disabled(crtc); + + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->disable(encoder); + intel_disable_pipe(intel_crtc); i9xx_pfit_disable(intel_crtc); @@ -4974,7 +5032,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (encoder->post_disable) encoder->post_disable(encoder); - if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) { + if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) { if (IS_CHERRYVIEW(dev)) chv_disable_pll(dev_priv, pipe); else if (IS_VALLEYVIEW(dev)) @@ -4984,7 +5042,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) } if (!IS_GEN2(dev)) - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_crtc->active = false; intel_update_watermarks(crtc); @@ -5357,7 +5415,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, * - LVDS dual channel mode * - Double wide pipe */ - if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && + if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) pipe_config->pipe_src_w &= ~1; @@ -5545,9 +5603,9 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } -static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) +static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int refclk; @@ -5595,7 +5653,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, crtc->config.dpll_hw_state.fp0 = fp; crtc->lowfreq_avail = false; - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && reduced_clock && i915.powersave) { crtc->config.dpll_hw_state.fp1 = fp2; crtc->lowfreq_avail = true; @@ -5764,16 +5822,16 @@ static void vlv_prepare_pll(struct intel_crtc *crtc) /* Set HBR and RBR LPF coefficients */ if (crtc->config.port_clock == 162000 || - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) + intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x009f0003); else vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x00d0000f); - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { /* Use SSC source */ if (pipe == PIPE_A) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), @@ -5793,8 +5851,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc) coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); coreclk = (coreclk & 0x0000ff00) | 0x01c00000; - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) coreclk |= 0x01000000; vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); @@ -5864,7 +5922,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc) (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT)); /* Loop filter */ - refclk = i9xx_get_refclk(&crtc->base, 0); + refclk = i9xx_get_refclk(crtc, 0); loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT | 2 << DPIO_CHV_GAIN_CTRL_SHIFT; if (refclk == 100000) @@ -5896,12 +5954,12 @@ static void i9xx_update_pll(struct intel_crtc *crtc, i9xx_update_pll_dividers(crtc, reduced_clock); - is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); + is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); dpll = DPLL_VGA_MODE_DIS; - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; @@ -5914,7 +5972,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc, if (is_sdvo) dpll |= DPLL_SDVO_HIGH_SPEED; - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ @@ -5944,7 +6002,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc, if (crtc->config.sdvo_tv_clock) dpll |= PLL_REF_INPUT_TVCLKINBC; - else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else @@ -5973,7 +6031,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll = DPLL_VGA_MODE_DIS; - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; } else { if (clock->p1 == 2) @@ -5984,10 +6042,10 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll |= PLL_P2_DIVIDE_BY_4; } - if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) + if (!IS_I830(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO)) dpll |= DPLL_DVO_2X_MODE; - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else @@ -6018,7 +6076,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) crtc_vtotal -= 1; crtc_vblank_end -= 1; - if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; else vsyncshift = adjusted_mode->crtc_hsync_start - @@ -6176,7 +6234,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { if (INTEL_INFO(dev)->gen < 4 || - intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) + intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; @@ -6190,13 +6248,12 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) POSTING_READ(PIPECONF(intel_crtc->pipe)); } -static int i9xx_crtc_mode_set(struct drm_crtc *crtc, +static int i9xx_crtc_mode_set(struct intel_crtc *crtc, int x, int y, struct drm_framebuffer *fb) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; bool ok, has_reduced_clock = false; @@ -6204,7 +6261,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct intel_encoder *encoder; const intel_limit_t *limit; - for_each_encoder_on_crtc(dev, crtc, encoder) { + for_each_encoder_on_crtc(dev, &crtc->base, encoder) { switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -6220,7 +6277,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, if (is_dsi) return 0; - if (!intel_crtc->config.clock_set) { + if (!crtc->config.clock_set) { refclk = i9xx_get_refclk(crtc, num_connectors); /* @@ -6231,7 +6288,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, */ limit = intel_limit(crtc, refclk); ok = dev_priv->display.find_dpll(limit, crtc, - intel_crtc->config.port_clock, + crtc->config.port_clock, refclk, NULL, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); @@ -6252,23 +6309,23 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, &reduced_clock); } /* Compat-code for transition, will disappear. */ - intel_crtc->config.dpll.n = clock.n; - intel_crtc->config.dpll.m1 = clock.m1; - intel_crtc->config.dpll.m2 = clock.m2; - intel_crtc->config.dpll.p1 = clock.p1; - intel_crtc->config.dpll.p2 = clock.p2; + crtc->config.dpll.n = clock.n; + crtc->config.dpll.m1 = clock.m1; + crtc->config.dpll.m2 = clock.m2; + crtc->config.dpll.p1 = clock.p1; + crtc->config.dpll.p2 = clock.p2; } if (IS_GEN2(dev)) { - i8xx_update_pll(intel_crtc, + i8xx_update_pll(crtc, has_reduced_clock ? &reduced_clock : NULL, num_connectors); } else if (IS_CHERRYVIEW(dev)) { - chv_update_pll(intel_crtc); + chv_update_pll(crtc); } else if (IS_VALLEYVIEW(dev)) { - vlv_update_pll(intel_crtc); + vlv_update_pll(crtc); } else { - i9xx_update_pll(intel_crtc, + i9xx_update_pll(crtc, has_reduced_clock ? &reduced_clock : NULL, num_connectors); } @@ -6434,8 +6491,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; - if (!intel_display_power_enabled(dev_priv, - POWER_DOMAIN_PIPE(crtc->pipe))) + if (!intel_display_power_is_enabled(dev_priv, + POWER_DOMAIN_PIPE(crtc->pipe))) return false; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; @@ -7021,7 +7078,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc) I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); - if (IS_BROADWELL(dev)) { + if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { val = 0; switch (intel_crtc->config.pipe_bpp) { @@ -7056,18 +7113,12 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int refclk; const intel_limit_t *limit; bool ret, is_lvds = false; - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - switch (intel_encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - } - } + is_lvds = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_LVDS); refclk = ironlake_get_refclk(crtc); @@ -7076,9 +7127,9 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, * refclk, or FALSE. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ - limit = intel_limit(crtc, refclk); - ret = dev_priv->display.find_dpll(limit, crtc, - to_intel_crtc(crtc)->config.port_clock, + limit = intel_limit(intel_crtc, refclk); + ret = dev_priv->display.find_dpll(limit, intel_crtc, + intel_crtc->config.port_clock, refclk, NULL, clock); if (!ret) return false; @@ -7091,7 +7142,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, * downclock feature. */ *has_reduced_clock = - dev_priv->display.find_dpll(limit, crtc, + dev_priv->display.find_dpll(limit, intel_crtc, dev_priv->lvds_downclock, refclk, clock, reduced_clock); @@ -7201,78 +7252,67 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, return dpll | DPLL_VCO_ENABLE; } -static int ironlake_crtc_mode_set(struct drm_crtc *crtc, +static int ironlake_crtc_mode_set(struct intel_crtc *crtc, int x, int y, struct drm_framebuffer *fb) { - struct drm_device *dev = crtc->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int num_connectors = 0; + struct drm_device *dev = crtc->base.dev; intel_clock_t clock, reduced_clock; u32 dpll = 0, fp = 0, fp2 = 0; bool ok, has_reduced_clock = false; bool is_lvds = false; - struct intel_encoder *encoder; struct intel_shared_dpll *pll; - for_each_encoder_on_crtc(dev, crtc, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - } - - num_connectors++; - } + is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS); WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); - ok = ironlake_compute_clocks(crtc, &clock, + ok = ironlake_compute_clocks(&crtc->base, &clock, &has_reduced_clock, &reduced_clock); - if (!ok && !intel_crtc->config.clock_set) { + if (!ok && !crtc->config.clock_set) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; } /* Compat-code for transition, will disappear. */ - if (!intel_crtc->config.clock_set) { - intel_crtc->config.dpll.n = clock.n; - intel_crtc->config.dpll.m1 = clock.m1; - intel_crtc->config.dpll.m2 = clock.m2; - intel_crtc->config.dpll.p1 = clock.p1; - intel_crtc->config.dpll.p2 = clock.p2; + if (!crtc->config.clock_set) { + crtc->config.dpll.n = clock.n; + crtc->config.dpll.m1 = clock.m1; + crtc->config.dpll.m2 = clock.m2; + crtc->config.dpll.p1 = clock.p1; + crtc->config.dpll.p2 = clock.p2; } /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ - if (intel_crtc->config.has_pch_encoder) { - fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); + if (crtc->config.has_pch_encoder) { + fp = i9xx_dpll_compute_fp(&crtc->config.dpll); if (has_reduced_clock) fp2 = i9xx_dpll_compute_fp(&reduced_clock); - dpll = ironlake_compute_dpll(intel_crtc, + dpll = ironlake_compute_dpll(crtc, &fp, &reduced_clock, has_reduced_clock ? &fp2 : NULL); - intel_crtc->config.dpll_hw_state.dpll = dpll; - intel_crtc->config.dpll_hw_state.fp0 = fp; + crtc->config.dpll_hw_state.dpll = dpll; + crtc->config.dpll_hw_state.fp0 = fp; if (has_reduced_clock) - intel_crtc->config.dpll_hw_state.fp1 = fp2; + crtc->config.dpll_hw_state.fp1 = fp2; else - intel_crtc->config.dpll_hw_state.fp1 = fp; + crtc->config.dpll_hw_state.fp1 = fp; - pll = intel_get_shared_dpll(intel_crtc); + pll = intel_get_shared_dpll(crtc); if (pll == NULL) { DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", - pipe_name(intel_crtc->pipe)); + pipe_name(crtc->pipe)); return -EINVAL; } } else - intel_put_shared_dpll(intel_crtc); + intel_put_shared_dpll(crtc); if (is_lvds && has_reduced_clock && i915.powersave) - intel_crtc->lowfreq_avail = true; + crtc->lowfreq_avail = true; else - intel_crtc->lowfreq_avail = false; + crtc->lowfreq_avail = false; return 0; } @@ -7444,8 +7484,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; - if (!intel_display_power_enabled(dev_priv, - POWER_DOMAIN_PIPE(crtc->pipe))) + if (!intel_display_power_is_enabled(dev_priv, + POWER_DOMAIN_PIPE(crtc->pipe))) return false; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; @@ -7638,7 +7678,6 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) { uint32_t val; - unsigned long irqflags; val = I915_READ(LCPLL_CTL); @@ -7658,10 +7697,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) * to call special forcewake code that doesn't touch runtime PM and * doesn't enable the forcewake delayed work. */ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + spin_lock_irq(&dev_priv->uncore.lock); if (dev_priv->uncore.forcewake_count++ == 0) dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_unlock_irq(&dev_priv->uncore.lock); if (val & LCPLL_POWER_DOWN_ALLOW) { val &= ~LCPLL_POWER_DOWN_ALLOW; @@ -7692,10 +7731,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) } /* See the big comment above. */ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + spin_lock_irq(&dev_priv->uncore.lock); if (--dev_priv->uncore.forcewake_count == 0) dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_unlock_irq(&dev_priv->uncore.lock); } /* @@ -7767,16 +7806,14 @@ static void haswell_modeset_global_resources(struct drm_device *dev) modeset_update_crtc_power_domains(dev); } -static int haswell_crtc_mode_set(struct drm_crtc *crtc, +static int haswell_crtc_mode_set(struct intel_crtc *crtc, int x, int y, struct drm_framebuffer *fb) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - if (!intel_ddi_pll_select(intel_crtc)) + if (!intel_ddi_pll_select(crtc)) return -EINVAL; - intel_crtc->lowfreq_avail = false; + crtc->lowfreq_avail = false; return 0; } @@ -7824,7 +7861,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, * DDI E. So just check whether this pipe is wired to DDI E and whether * the PCH transcoder is on. */ - if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { + if (INTEL_INFO(dev)->gen < 9 && + (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { pipe_config->has_pch_encoder = true; tmp = I915_READ(FDI_RX_CTL(PIPE_A)); @@ -7843,7 +7881,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, enum intel_display_power_domain pfit_domain; uint32_t tmp; - if (!intel_display_power_enabled(dev_priv, + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(crtc->pipe))) return false; @@ -7872,7 +7910,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->cpu_transcoder = TRANSCODER_EDP; } - if (!intel_display_power_enabled(dev_priv, + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) return false; @@ -7885,7 +7923,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_timings(crtc, pipe_config); pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); - if (intel_display_power_enabled(dev_priv, pfit_domain)) + if (intel_display_power_is_enabled(dev_priv, pfit_domain)) ironlake_get_pfit_config(crtc, pipe_config); if (IS_HASWELL(dev)) @@ -8015,6 +8053,7 @@ static void haswell_write_eld(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint8_t *eld = connector->eld; uint32_t eldv; uint32_t i; @@ -8055,7 +8094,7 @@ static void haswell_write_eld(struct drm_connector *connector, eldv = AUDIO_ELD_VALID_A << (pipe * 4); - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) { DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ @@ -8098,6 +8137,7 @@ static void ironlake_write_eld(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint8_t *eld = connector->eld; uint32_t eldv; uint32_t i; @@ -8151,7 +8191,7 @@ static void ironlake_write_eld(struct drm_connector *connector, eldv = IBX_ELD_VALIDB << ((i - 1) * 4); } - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) { DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ @@ -8255,8 +8295,10 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) intel_crtc->cursor_cntl = 0; } - if (intel_crtc->cursor_base != base) + if (intel_crtc->cursor_base != base) { I915_WRITE(_CURABASE, base); + intel_crtc->cursor_base = base; + } if (intel_crtc->cursor_size != size) { I915_WRITE(CURSIZE, size); @@ -8296,9 +8338,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) return; } cntl |= pipe << 28; /* Connect to correct pipe */ + + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + cntl |= CURSOR_PIPE_CSC_ENABLE; } - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - cntl |= CURSOR_PIPE_CSC_ENABLE; + + if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) + cntl |= CURSOR_ROTATE_180; if (intel_crtc->cursor_cntl != cntl) { I915_WRITE(CURCNTR(pipe), cntl); @@ -8309,6 +8355,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) /* and commit changes on next vblank */ I915_WRITE(CURBASE(pipe), base); POSTING_READ(CURBASE(pipe)); + + intel_crtc->cursor_base = base; } /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ @@ -8355,11 +8403,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, I915_WRITE(CURPOS(pipe), pos); + /* ILK+ do this automagically */ + if (HAS_GMCH_DISPLAY(dev) && + to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) { + base += (intel_crtc->cursor_height * + intel_crtc->cursor_width - 1) * 4; + } + if (IS_845G(dev) || IS_I865G(dev)) i845_update_cursor(crtc, base); else i9xx_update_cursor(crtc, base); - intel_crtc->cursor_base = base; } static bool cursor_size_ok(struct drm_device *dev, @@ -8399,13 +8453,6 @@ static bool cursor_size_ok(struct drm_device *dev, return true; } -/* - * intel_crtc_cursor_set_obj - Set cursor to specified GEM object - * - * Note that the object's reference will be consumed if the update fails. If - * the update succeeds, the reference of the old object (if any) will be - * consumed. - */ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, struct drm_i915_gem_object *obj, uint32_t width, uint32_t height) @@ -8414,7 +8461,7 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; - unsigned old_width, stride; + unsigned old_width; uint32_t addr; int ret; @@ -8426,30 +8473,11 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, goto finish; } - /* Check for which cursor types we support */ - if (!cursor_size_ok(dev, width, height)) { - DRM_DEBUG("Cursor dimension not supported\n"); - return -EINVAL; - } - - stride = roundup_pow_of_two(width) * 4; - if (obj->base.size < stride * height) { - DRM_DEBUG_KMS("buffer is too small\n"); - ret = -ENOMEM; - goto fail; - } - /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); if (!INTEL_INFO(dev)->cursor_needs_physical) { unsigned alignment; - if (obj->tiling_mode) { - DRM_DEBUG_KMS("cursor cannot be tiled\n"); - ret = -EINVAL; - goto fail_locked; - } - /* * Global gtt pte registers are special registers which actually * forward writes to a chunk of system memory. Which means that @@ -8525,8 +8553,6 @@ fail_unpin: i915_gem_object_unpin_from_display_plane(obj); fail_locked: mutex_unlock(&dev->struct_mutex); -fail: - drm_gem_object_unreference_unlocked(&obj->base); return ret; } @@ -9023,35 +9049,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, return mode; } -static void intel_increase_pllclock(struct drm_device *dev, - enum pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int dpll_reg = DPLL(pipe); - int dpll; - - if (!HAS_GMCH_DISPLAY(dev)) - return; - - if (!dev_priv->lvds_downclock_avail) - return; - - dpll = I915_READ(dpll_reg); - if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { - DRM_DEBUG_DRIVER("upclocking LVDS\n"); - - assert_panel_unlocked(dev_priv, pipe); - - dpll &= ~DISPLAY_RATE_SELECT_FPA1; - I915_WRITE(dpll_reg, dpll); - intel_wait_for_vblank(dev, pipe); - - dpll = I915_READ(dpll_reg); - if (dpll & DISPLAY_RATE_SELECT_FPA1) - DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); - } -} - static void intel_decrease_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -9127,199 +9124,16 @@ out: intel_runtime_pm_put(dev_priv); } - -/** - * intel_mark_fb_busy - mark given planes as busy - * @dev: DRM device - * @frontbuffer_bits: bits for the affected planes - * @ring: optional ring for asynchronous commands - * - * This function gets called every time the screen contents change. It can be - * used to keep e.g. the update rate at the nominal refresh rate with DRRS. - */ -static void intel_mark_fb_busy(struct drm_device *dev, - unsigned frontbuffer_bits, - struct intel_engine_cs *ring) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe pipe; - - if (!i915.powersave) - return; - - for_each_pipe(dev_priv, pipe) { - if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) - continue; - - intel_increase_pllclock(dev, pipe); - if (ring && intel_fbc_enabled(dev)) - ring->fbc_dirty = true; - } -} - -/** - * intel_fb_obj_invalidate - invalidate frontbuffer object - * @obj: GEM object to invalidate - * @ring: set for asynchronous rendering - * - * This function gets called every time rendering on the given object starts and - * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must - * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed - * until the rendering completes or a flip on this frontbuffer plane is - * scheduled. - */ -void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring) -{ - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - if (!obj->frontbuffer_bits) - return; - - if (ring) { - mutex_lock(&dev_priv->fb_tracking.lock); - dev_priv->fb_tracking.busy_bits - |= obj->frontbuffer_bits; - dev_priv->fb_tracking.flip_bits - &= ~obj->frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); - } - - intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); - - intel_edp_psr_invalidate(dev, obj->frontbuffer_bits); -} - -/** - * intel_frontbuffer_flush - flush frontbuffer - * @dev: DRM device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called every time rendering on the given planes has - * completed and frontbuffer caching can be started again. Flushes will get - * delayed if they're blocked by some oustanding asynchronous rendering. - * - * Can be called without any locks held. - */ -void intel_frontbuffer_flush(struct drm_device *dev, - unsigned frontbuffer_bits) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* Delay flushing when rings are still busy.*/ - mutex_lock(&dev_priv->fb_tracking.lock); - frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); - - intel_mark_fb_busy(dev, frontbuffer_bits, NULL); - - intel_edp_psr_flush(dev, frontbuffer_bits); - - /* - * FIXME: Unconditional fbc flushing here is a rather gross hack and - * needs to be reworked into a proper frontbuffer tracking scheme like - * psr employs. - */ - if (IS_BROADWELL(dev)) - gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN); -} - -/** - * intel_fb_obj_flush - flush frontbuffer object - * @obj: GEM object to flush - * @retire: set when retiring asynchronous rendering - * - * This function gets called every time rendering on the given object has - * completed and frontbuffer caching can be started again. If @retire is true - * then any delayed flushes will be unblocked. - */ -void intel_fb_obj_flush(struct drm_i915_gem_object *obj, - bool retire) -{ - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned frontbuffer_bits; - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - if (!obj->frontbuffer_bits) - return; - - frontbuffer_bits = obj->frontbuffer_bits; - - if (retire) { - mutex_lock(&dev_priv->fb_tracking.lock); - /* Filter out new bits since rendering started. */ - frontbuffer_bits &= dev_priv->fb_tracking.busy_bits; - - dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); - } - - intel_frontbuffer_flush(dev, frontbuffer_bits); -} - -/** - * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip - * @dev: DRM device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called after scheduling a flip on @obj. The actual - * frontbuffer flushing will be delayed until completion is signalled with - * intel_frontbuffer_flip_complete. If an invalidate happens in between this - * flush will be cancelled. - * - * Can be called without any locks held. - */ -void intel_frontbuffer_flip_prepare(struct drm_device *dev, - unsigned frontbuffer_bits) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - mutex_lock(&dev_priv->fb_tracking.lock); - dev_priv->fb_tracking.flip_bits - |= frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); -} - -/** - * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush - * @dev: DRM device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called after the flip has been latched and will complete - * on the next vblank. It will execute the fush if it hasn't been cancalled yet. - * - * Can be called without any locks held. - */ -void intel_frontbuffer_flip_complete(struct drm_device *dev, - unsigned frontbuffer_bits) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - mutex_lock(&dev_priv->fb_tracking.lock); - /* Mask any cancelled flips. */ - frontbuffer_bits &= dev_priv->fb_tracking.flip_bits; - dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); - - intel_frontbuffer_flush(dev, frontbuffer_bits); -} - static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; struct intel_unpin_work *work; - unsigned long flags; - spin_lock_irqsave(&dev->event_lock, flags); + spin_lock_irq(&dev->event_lock); work = intel_crtc->unpin_work; intel_crtc->unpin_work = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock_irq(&dev->event_lock); if (work) { cancel_work_sync(&work->work); @@ -9365,6 +9179,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev, if (intel_crtc == NULL) return; + /* + * This is called both by irq handlers and the reset code (to complete + * lost pageflips) so needs the full irqsave spinlocks. + */ spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; @@ -9446,7 +9264,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); unsigned long flags; - /* NB: An MMIO update of the plane base pointer will also + + /* + * This is called both by irq handlers and the reset code (to complete + * lost pageflips) so needs the full irqsave spinlocks. + * + * NB: An MMIO update of the plane base pointer will also * generate a page-flip completion irq, i.e. every modeset * is also accompanied by a spurious intel_prepare_page_flip(). */ @@ -9821,7 +9644,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned long irq_flags; int ret; if (WARN_ON(intel_crtc->mmio_flip.seqno)) @@ -9835,10 +9657,10 @@ static int intel_queue_mmio_flip(struct drm_device *dev, return 0; } - spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); + spin_lock_irq(&dev_priv->mmio_flip_lock); intel_crtc->mmio_flip.seqno = obj->last_write_seqno; intel_crtc->mmio_flip.ring_id = obj->ring->id; - spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); + spin_unlock_irq(&dev_priv->mmio_flip_lock); /* * Double check to catch cases where irq fired before @@ -9903,18 +9725,19 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned long flags; + + WARN_ON(!in_irq()); if (crtc == NULL) return; - spin_lock_irqsave(&dev->event_lock, flags); + spin_lock(&dev->event_lock); if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) { WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe)); page_flip_completed(intel_crtc); } - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock(&dev->event_lock); } static int intel_crtc_page_flip(struct drm_crtc *crtc, @@ -9930,7 +9753,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, enum pipe pipe = intel_crtc->pipe; struct intel_unpin_work *work; struct intel_engine_cs *ring; - unsigned long flags; int ret; /* @@ -9971,7 +9793,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto free_work; /* We borrow the event spin lock for protecting unpin_work */ - spin_lock_irqsave(&dev->event_lock, flags); + spin_lock_irq(&dev->event_lock); if (intel_crtc->unpin_work) { /* Before declaring the flip queue wedged, check if * the hardware completed the operation behind our backs. @@ -9981,7 +9803,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, page_flip_completed(intel_crtc); } else { DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock_irq(&dev->event_lock); drm_crtc_vblank_put(crtc); kfree(work); @@ -9989,7 +9811,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, } } intel_crtc->unpin_work = work; - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock_irq(&dev->event_lock); if (atomic_read(&intel_crtc->unpin_work_count) >= 2) flush_workqueue(dev_priv->wq); @@ -10076,9 +9898,9 @@ cleanup_pending: mutex_unlock(&dev->struct_mutex); cleanup: - spin_lock_irqsave(&dev->event_lock, flags); + spin_lock_irq(&dev->event_lock); intel_crtc->unpin_work = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock_irq(&dev->event_lock); drm_crtc_vblank_put(crtc); free_work: @@ -10089,9 +9911,9 @@ out_hang: intel_crtc_wait_for_pending_flips(crtc); ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); if (ret == 0 && event) { - spin_lock_irqsave(&dev->event_lock, flags); + spin_lock_irq(&dev->event_lock); drm_send_vblank_event(dev, pipe, event); - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock_irq(&dev->event_lock); } } return ret; @@ -11074,7 +10896,7 @@ static void update_scanline_offset(struct intel_crtc *crtc) crtc->scanline_offset = vtotal - 1; } else if (HAS_DDI(dev) && - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) { + intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { crtc->scanline_offset = 2; } else crtc->scanline_offset = 1; @@ -11194,8 +11016,7 @@ static int __intel_set_mode(struct drm_crtc *crtc, crtc->x = x; crtc->y = y; - ret = dev_priv->display.crtc_mode_set(&intel_crtc->base, - x, y, fb); + ret = dev_priv->display.crtc_mode_set(intel_crtc, x, y, fb); if (ret) goto done; } @@ -11677,7 +11498,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, { uint32_t val; - if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) return false; val = I915_READ(PCH_DPLL(pll->id)); @@ -11811,161 +11632,189 @@ disable_unpin: } static int -intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) +intel_check_primary_plane(struct drm_plane *plane, + struct intel_plane_state *state) +{ + struct drm_crtc *crtc = state->crtc; + struct drm_framebuffer *fb = state->fb; + struct drm_rect *dest = &state->dst; + struct drm_rect *src = &state->src; + const struct drm_rect *clip = &state->clip; + int ret; + + ret = drm_plane_helper_check_update(plane, crtc, fb, + src, dest, clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true, &state->visible); + if (ret) + return ret; + + /* no fb bound */ + if (state->visible && !fb) { + DRM_ERROR("No FB bound\n"); + return -EINVAL; + } + + return 0; +} + +static int +intel_commit_primary_plane(struct drm_plane *plane, + struct intel_plane_state *state) { + struct drm_crtc *crtc = state->crtc; + struct drm_framebuffer *fb = state->fb; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; + struct drm_framebuffer *old_fb = plane->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); - struct drm_rect dest = { - /* integer pixels */ - .x1 = crtc_x, - .y1 = crtc_y, - .x2 = crtc_x + crtc_w, - .y2 = crtc_y + crtc_h, - }; - struct drm_rect src = { - /* 16.16 fixed point */ - .x1 = src_x, - .y1 = src_y, - .x2 = src_x + src_w, - .y2 = src_y + src_h, - }; - const struct drm_rect clip = { - /* integer pixels */ - .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, - .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, - }; - const struct { - int crtc_x, crtc_y; - unsigned int crtc_w, crtc_h; - uint32_t src_x, src_y, src_w, src_h; - } orig = { - .crtc_x = crtc_x, - .crtc_y = crtc_y, - .crtc_w = crtc_w, - .crtc_h = crtc_h, - .src_x = src_x, - .src_y = src_y, - .src_w = src_w, - .src_h = src_h, - }; struct intel_plane *intel_plane = to_intel_plane(plane); - bool visible; + struct drm_rect *src = &state->src; int ret; - ret = drm_plane_helper_check_update(plane, crtc, fb, - &src, &dest, &clip, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - false, true, &visible); + intel_crtc_wait_for_pending_flips(crtc); - if (ret) - return ret; + if (intel_crtc_has_pending_flip(crtc)) { + DRM_ERROR("pipe is still busy with an old pageflip\n"); + return -EBUSY; + } - /* - * If the CRTC isn't enabled, we're just pinning the framebuffer, - * updating the fb pointer, and returning without touching the - * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to - * turn on the display with all planes setup as desired. - */ - if (!crtc->enabled) { + if (plane->fb != fb) { mutex_lock(&dev->struct_mutex); - - /* - * If we already called setplane while the crtc was disabled, - * we may have an fb pinned; unpin it. - */ - if (plane->fb) - intel_unpin_fb_obj(old_obj); - - i915_gem_track_fb(old_obj, obj, - INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); - - /* Pin and return without programming hardware */ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + if (ret == 0) + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(pipe)); mutex_unlock(&dev->struct_mutex); - - return ret; + if (ret != 0) { + DRM_DEBUG_KMS("pin & fence failed\n"); + return ret; + } } - intel_crtc_wait_for_pending_flips(crtc); - - /* - * If clipping results in a non-visible primary plane, we'll disable - * the primary plane. Note that this is a bit different than what - * happens if userspace explicitly disables the plane by passing fb=0 - * because plane->fb still gets set and pinned. - */ - if (!visible) { - mutex_lock(&dev->struct_mutex); + crtc->primary->fb = fb; + crtc->x = src->x1; + crtc->y = src->y1; + + intel_plane->crtc_x = state->orig_dst.x1; + intel_plane->crtc_y = state->orig_dst.y1; + intel_plane->crtc_w = drm_rect_width(&state->orig_dst); + intel_plane->crtc_h = drm_rect_height(&state->orig_dst); + intel_plane->src_x = state->orig_src.x1; + intel_plane->src_y = state->orig_src.y1; + intel_plane->src_w = drm_rect_width(&state->orig_src); + intel_plane->src_h = drm_rect_height(&state->orig_src); + intel_plane->obj = obj; + if (intel_crtc->active) { /* - * Try to pin the new fb first so that we can bail out if we - * fail. + * FBC does not work on some platforms for rotated + * planes, so disable it when rotation is not 0 and + * update it when rotation is set back to 0. + * + * FIXME: This is redundant with the fbc update done in + * the primary plane enable function except that that + * one is done too late. We eventually need to unify + * this. */ - if (plane->fb != fb) { - ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (intel_crtc->primary_enabled && + INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && + dev_priv->fbc.plane == intel_crtc->plane && + intel_plane->rotation != BIT(DRM_ROTATE_0)) { + intel_disable_fbc(dev); } - i915_gem_track_fb(old_obj, obj, - INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); - - if (intel_crtc->primary_enabled) - intel_disable_primary_hw_plane(plane, crtc); + if (state->visible) { + bool was_enabled = intel_crtc->primary_enabled; + /* FIXME: kill this fastboot hack */ + intel_update_pipe_size(intel_crtc); - if (plane->fb != fb) - if (plane->fb) - intel_unpin_fb_obj(old_obj); + intel_crtc->primary_enabled = true; - mutex_unlock(&dev->struct_mutex); + dev_priv->display.update_primary_plane(crtc, plane->fb, + crtc->x, crtc->y); - } else { - if (intel_crtc && intel_crtc->active && - intel_crtc->primary_enabled) { /* - * FBC does not work on some platforms for rotated - * planes, so disable it when rotation is not 0 and - * update it when rotation is set back to 0. - * - * FIXME: This is redundant with the fbc update done in - * the primary plane enable function except that that - * one is done too late. We eventually need to unify - * this. + * BDW signals flip done immediately if the plane + * is disabled, even if the plane enable is already + * armed to occur at the next vblank :( */ - if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && - dev_priv->fbc.plane == intel_crtc->plane && - intel_plane->rotation != BIT(DRM_ROTATE_0)) { - intel_disable_fbc(dev); - } + if (IS_BROADWELL(dev) && !was_enabled) + intel_wait_for_vblank(dev, intel_crtc->pipe); + } else { + /* + * If clipping results in a non-visible primary plane, + * we'll disable the primary plane. Note that this is + * a bit different than what happens if userspace + * explicitly disables the plane by passing fb=0 + * because plane->fb still gets set and pinned. + */ + intel_disable_primary_hw_plane(plane, crtc); } - ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb); - if (ret) - return ret; - if (!intel_crtc->primary_enabled) - intel_enable_primary_hw_plane(plane, crtc); + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); + + mutex_lock(&dev->struct_mutex); + intel_update_fbc(dev); + mutex_unlock(&dev->struct_mutex); } - intel_plane->crtc_x = orig.crtc_x; - intel_plane->crtc_y = orig.crtc_y; - intel_plane->crtc_w = orig.crtc_w; - intel_plane->crtc_h = orig.crtc_h; - intel_plane->src_x = orig.src_x; - intel_plane->src_y = orig.src_y; - intel_plane->src_w = orig.src_w; - intel_plane->src_h = orig.src_h; - intel_plane->obj = obj; + if (old_fb && old_fb != fb) { + if (intel_crtc->active) + intel_wait_for_vblank(dev, intel_crtc->pipe); + + mutex_lock(&dev->struct_mutex); + intel_unpin_fb_obj(old_obj); + mutex_unlock(&dev->struct_mutex); + } + + return 0; +} + +static int +intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct intel_plane_state state; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int ret; + + state.crtc = crtc; + state.fb = fb; + + /* sample coordinates in 16.16 fixed point */ + state.src.x1 = src_x; + state.src.x2 = src_x + src_w; + state.src.y1 = src_y; + state.src.y2 = src_y + src_h; + + /* integer pixels */ + state.dst.x1 = crtc_x; + state.dst.x2 = crtc_x + crtc_w; + state.dst.y1 = crtc_y; + state.dst.y2 = crtc_y + crtc_h; + + state.clip.x1 = 0; + state.clip.y1 = 0; + state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0; + state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0; + + state.orig_src = state.src; + state.orig_dst = state.dst; + + ret = intel_check_primary_plane(plane, &state); + if (ret) + return ret; + + intel_commit_primary_plane(plane, &state); return 0; } @@ -12044,51 +11893,80 @@ intel_cursor_plane_disable(struct drm_plane *plane) } static int -intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) +intel_check_cursor_plane(struct drm_plane *plane, + struct intel_plane_state *state) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; - struct drm_rect dest = { - /* integer pixels */ - .x1 = crtc_x, - .y1 = crtc_y, - .x2 = crtc_x + crtc_w, - .y2 = crtc_y + crtc_h, - }; - struct drm_rect src = { - /* 16.16 fixed point */ - .x1 = src_x, - .y1 = src_y, - .x2 = src_x + src_w, - .y2 = src_y + src_h, - }; - const struct drm_rect clip = { - /* integer pixels */ - .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, - .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, - }; - bool visible; + struct drm_crtc *crtc = state->crtc; + struct drm_device *dev = crtc->dev; + struct drm_framebuffer *fb = state->fb; + struct drm_rect *dest = &state->dst; + struct drm_rect *src = &state->src; + const struct drm_rect *clip = &state->clip; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + int crtc_w, crtc_h; + unsigned stride; int ret; ret = drm_plane_helper_check_update(plane, crtc, fb, - &src, &dest, &clip, + src, dest, clip, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, - true, true, &visible); + true, true, &state->visible); if (ret) return ret; - crtc->cursor_x = crtc_x; - crtc->cursor_y = crtc_y; + + /* if we want to turn off the cursor ignore width and height */ + if (!obj) + return 0; + + /* Check for which cursor types we support */ + crtc_w = drm_rect_width(&state->orig_dst); + crtc_h = drm_rect_height(&state->orig_dst); + if (!cursor_size_ok(dev, crtc_w, crtc_h)) { + DRM_DEBUG("Cursor dimension not supported\n"); + return -EINVAL; + } + + stride = roundup_pow_of_two(crtc_w) * 4; + if (obj->base.size < stride * crtc_h) { + DRM_DEBUG_KMS("buffer is too small\n"); + return -ENOMEM; + } + + if (fb == crtc->cursor->fb) + return 0; + + /* we only need to pin inside GTT if cursor is non-phy */ + mutex_lock(&dev->struct_mutex); + if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { + DRM_DEBUG_KMS("cursor cannot be tiled\n"); + ret = -EINVAL; + } + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static int +intel_commit_cursor_plane(struct drm_plane *plane, + struct intel_plane_state *state) +{ + struct drm_crtc *crtc = state->crtc; + struct drm_framebuffer *fb = state->fb; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + int crtc_w, crtc_h; + + crtc->cursor_x = state->orig_dst.x1; + crtc->cursor_y = state->orig_dst.y1; if (fb != crtc->cursor->fb) { + crtc_w = drm_rect_width(&state->orig_dst); + crtc_h = drm_rect_height(&state->orig_dst); return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); } else { - intel_crtc_update_cursor(crtc, visible); + intel_crtc_update_cursor(crtc, state->visible); intel_frontbuffer_flip(crtc->dev, INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe)); @@ -12096,10 +11974,53 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, return 0; } } + +static int +intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_plane_state state; + int ret; + + state.crtc = crtc; + state.fb = fb; + + /* sample coordinates in 16.16 fixed point */ + state.src.x1 = src_x; + state.src.x2 = src_x + src_w; + state.src.y1 = src_y; + state.src.y2 = src_y + src_h; + + /* integer pixels */ + state.dst.x1 = crtc_x; + state.dst.x2 = crtc_x + crtc_w; + state.dst.y1 = crtc_y; + state.dst.y2 = crtc_y + crtc_h; + + state.clip.x1 = 0; + state.clip.y1 = 0; + state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0; + state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0; + + state.orig_src = state.src; + state.orig_dst = state.dst; + + ret = intel_check_cursor_plane(plane, &state); + if (ret) + return ret; + + return intel_commit_cursor_plane(plane, &state); +} + static const struct drm_plane_funcs intel_cursor_plane_funcs = { .update_plane = intel_cursor_plane_update, .disable_plane = intel_cursor_plane_disable, .destroy = intel_plane_destroy, + .set_property = intel_plane_set_property, }; static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, @@ -12115,12 +12036,26 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, cursor->max_downscale = 1; cursor->pipe = pipe; cursor->plane = pipe; + cursor->rotation = BIT(DRM_ROTATE_0); drm_universal_plane_init(dev, &cursor->base, 0, &intel_cursor_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), DRM_PLANE_TYPE_CURSOR); + + if (INTEL_INFO(dev)->gen >= 4) { + if (!dev->mode_config.rotation_property) + dev->mode_config.rotation_property = + drm_mode_create_rotation_property(dev, + BIT(DRM_ROTATE_0) | + BIT(DRM_ROTATE_180)); + if (dev->mode_config.rotation_property) + drm_object_attach_property(&cursor->base.base, + dev->mode_config.rotation_property, + cursor->rotation); + } + return &cursor->base; } @@ -12284,7 +12219,10 @@ static bool intel_crt_present(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_ULT(dev)) + if (INTEL_INFO(dev)->gen >= 9) + return false; + + if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) return false; if (IS_CHERRYVIEW(dev)) @@ -12636,8 +12574,12 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_disable = haswell_crtc_disable; dev_priv->display.off = ironlake_crtc_off; - dev_priv->display.update_primary_plane = - ironlake_update_primary_plane; + if (INTEL_INFO(dev)->gen >= 9) + dev_priv->display.update_primary_plane = + skylake_update_primary_plane; + else + dev_priv->display.update_primary_plane = + ironlake_update_primary_plane; } else if (HAS_PCH_SPLIT(dev)) { dev_priv->display.get_pipe_config = ironlake_get_pipe_config; dev_priv->display.get_plane_config = ironlake_get_plane_config; @@ -12721,6 +12663,10 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.modeset_global_resources = valleyview_modeset_global_resources; dev_priv->display.write_eld = ironlake_write_eld; + } else if (INTEL_INFO(dev)->gen >= 9) { + dev_priv->display.write_eld = haswell_write_eld; + dev_priv->display.modeset_global_resources = + haswell_modeset_global_resources; } /* Default just returns -ENODEV to indicate unsupported */ @@ -12951,11 +12897,6 @@ void intel_modeset_init_hw(struct drm_device *dev) intel_enable_gt_powersave(dev); } -void intel_modeset_suspend_hw(struct drm_device *dev) -{ - intel_suspend_hw(dev); -} - void intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -13291,7 +13232,7 @@ void i915_redisable_vga(struct drm_device *dev) * level, just check if the power well is enabled instead of trying to * follow the "don't touch the power well if we don't need it" policy * the rest of the driver uses. */ - if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA)) + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) return; i915_redisable_vga_power_on(dev); @@ -13512,9 +13453,7 @@ void intel_modeset_cleanup(struct drm_device *dev) * Too much stuff here (turning of rps, connectors, ...) would * experience fancy races otherwise. */ - drm_irq_uninstall(dev); - intel_hpd_cancel_work(dev_priv); - dev_priv->pm._irqs_disabled = true; + intel_irq_uninstall(dev_priv); /* * Due to the hpd irq storm handling the hotplug work can re-arm the @@ -13669,8 +13608,8 @@ intel_display_capture_error_state(struct drm_device *dev) for_each_pipe(dev_priv, i) { error->pipe[i].power_domain_on = - intel_display_power_enabled_unlocked(dev_priv, - POWER_DOMAIN_PIPE(i)); + __intel_display_power_is_enabled(dev_priv, + POWER_DOMAIN_PIPE(i)); if (!error->pipe[i].power_domain_on) continue; @@ -13705,7 +13644,7 @@ intel_display_capture_error_state(struct drm_device *dev) enum transcoder cpu_transcoder = transcoders[i]; error->transcoder[i].power_domain_on = - intel_display_power_enabled_unlocked(dev_priv, + __intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder)); if (!error->transcoder[i].power_domain_on) continue; @@ -13789,9 +13728,8 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) for_each_intel_crtc(dev, crtc) { struct intel_unpin_work *work; - unsigned long irqflags; - spin_lock_irqsave(&dev->event_lock, irqflags); + spin_lock_irq(&dev->event_lock); work = crtc->unpin_work; @@ -13801,6 +13739,6 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) work->event = NULL; } - spin_unlock_irqrestore(&dev->event_lock, irqflags); + spin_unlock_irq(&dev->event_lock); } } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5ad45bfff3fe..dc4bc90a9df0 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -225,7 +225,7 @@ intel_dp_mode_valid(struct drm_connector *connector, } static uint32_t -pack_aux(uint8_t *src, int src_bytes) +pack_aux(const uint8_t *src, int src_bytes) { int i; uint32_t v = 0; @@ -661,6 +661,16 @@ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return index ? 0 : 100; } +static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +{ + /* + * SKL doesn't need us to program the AUX clock divider (Hardware will + * derive the clock from CDCLK automatically). We still implement the + * get_aux_clock_divider vfunc to plug-in into the existing code. + */ + return index ? 0 : 1; +} + static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, bool has_aux_irq, int send_bytes, @@ -691,9 +701,24 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); } +static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, + bool has_aux_irq, + int send_bytes, + uint32_t unused) +{ + return DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_DONE | + (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_TIME_OUT_1600us | + DP_AUX_CH_CTL_RECEIVE_ERROR | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); +} + static int intel_dp_aux_ch(struct intel_dp *intel_dp, - uint8_t *send, int send_bytes, + const uint8_t *send, int send_bytes, uint8_t *recv, int recv_size) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); @@ -925,7 +950,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) BUG(); } - if (!HAS_DDI(dev)) + /* + * The AUX_CTL register is usually DP_CTL + 0x10. + * + * On Haswell and Broadwell though: + * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU + * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU + * + * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU. + */ + if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; intel_dp->aux.name = name; @@ -1819,7 +1853,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, u32 tmp; power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_enabled(dev_priv, power_domain)) + if (!intel_display_power_is_enabled(dev_priv, power_domain)) return false; tmp = I915_READ(intel_dp->output_reg); @@ -1995,10 +2029,8 @@ static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, POSTING_READ(ctl_reg); } -static void intel_edp_psr_setup(struct intel_dp *intel_dp) +static void intel_edp_psr_setup_vsc(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; struct edp_vsc_psr psr_vsc; /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ @@ -2008,10 +2040,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp) psr_vsc.sdp_header.HB2 = 0x2; psr_vsc.sdp_header.HB3 = 0x8; intel_edp_psr_write_vsc(intel_dp, &psr_vsc); - - /* Avoid continuous PSR exit by masking memup and hpd */ - I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | - EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); } static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) @@ -2021,8 +2049,17 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t aux_clock_divider; int precharge = 0x3; - int msg_size = 5; /* Header(4) + Message(1) */ bool only_standby = false; + static const uint8_t aux_msg[] = { + [0] = DP_AUX_NATIVE_WRITE << 4, + [1] = DP_SET_POWER >> 8, + [2] = DP_SET_POWER & 0xff, + [3] = 1 - 1, + [4] = DP_SET_POWER_D0, + }; + int i; + + BUILD_BUG_ON(sizeof(aux_msg) > 20); aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); @@ -2038,11 +2075,13 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); /* Setup AUX registers */ - I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); - I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION); + for (i = 0; i < sizeof(aux_msg); i += 4) + I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i, + pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); + I915_WRITE(EDP_PSR_AUX_CTL(dev), DP_AUX_CH_CTL_TIME_OUT_400us | - (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); } @@ -2131,10 +2170,7 @@ static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) WARN_ON(dev_priv->psr.active); lockdep_assert_held(&dev_priv->psr.lock); - /* Enable PSR on the panel */ - intel_edp_psr_enable_sink(intel_dp); - - /* Enable PSR on the host */ + /* Enable/Re-enable PSR on the host */ intel_edp_psr_enable_source(intel_dp); dev_priv->psr.active = true; @@ -2158,17 +2194,25 @@ void intel_edp_psr_enable(struct intel_dp *intel_dp) mutex_lock(&dev_priv->psr.lock); if (dev_priv->psr.enabled) { DRM_DEBUG_KMS("PSR already in use\n"); - mutex_unlock(&dev_priv->psr.lock); - return; + goto unlock; } + if (!intel_edp_psr_match_conditions(intel_dp)) + goto unlock; + dev_priv->psr.busy_frontbuffer_bits = 0; - /* Setup PSR once */ - intel_edp_psr_setup(intel_dp); + intel_edp_psr_setup_vsc(intel_dp); - if (intel_edp_psr_match_conditions(intel_dp)) - dev_priv->psr.enabled = intel_dp; + /* Avoid continuous PSR exit by masking memup and hpd */ + I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | + EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); + + /* Enable PSR on the panel */ + intel_edp_psr_enable_sink(intel_dp); + + dev_priv->psr.enabled = intel_dp; +unlock: mutex_unlock(&dev_priv->psr.lock); } @@ -2209,6 +2253,17 @@ static void intel_edp_psr_work(struct work_struct *work) container_of(work, typeof(*dev_priv), psr.work.work); struct intel_dp *intel_dp = dev_priv->psr.enabled; + /* We have to make sure PSR is ready for re-enable + * otherwise it keeps disabled until next full enable/disable cycle. + * PSR might take some time to get fully disabled + * and be ready for re-enable. + */ + if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & + EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { + DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); + return; + } + mutex_lock(&dev_priv->psr.lock); intel_dp = dev_priv->psr.enabled; @@ -2680,6 +2735,15 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder) mutex_lock(&dev_priv->dpio_lock); + /* allow hardware to manage TX FIFO reset source */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); + val &= ~DPIO_LANEDESKEW_STRAP_OVRD; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); + val &= ~DPIO_LANEDESKEW_STRAP_OVRD; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); + /* Deassert soft data lane reset*/ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); val |= CHV_PCS_REQ_SOFTRESET_EN; @@ -2843,7 +2907,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); enum port port = dp_to_dig_port(intel_dp)->port; - if (IS_VALLEYVIEW(dev)) + if (INTEL_INFO(dev)->gen >= 9) + return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; + else if (IS_VALLEYVIEW(dev)) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; else if (IS_GEN7(dev) && port == PORT_A) return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; @@ -2859,7 +2925,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) struct drm_device *dev = intel_dp_to_dev(intel_dp); enum port port = dp_to_dig_port(intel_dp)->port; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + if (INTEL_INFO(dev)->gen >= 9) { + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + return DP_TRAIN_PRE_EMPH_LEVEL_3; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + return DP_TRAIN_PRE_EMPH_LEVEL_2; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + return DP_TRAIN_PRE_EMPH_LEVEL_1; + default: + return DP_TRAIN_PRE_EMPH_LEVEL_0; + } + } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: return DP_TRAIN_PRE_EMPH_LEVEL_3; @@ -3095,12 +3172,26 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp) /* Clear calc init */ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); + val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); + val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); + val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); + val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); + val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); + val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); + val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); + val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); + /* Program swing deemph */ for (i = 0; i < 4; i++) { val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); @@ -3341,7 +3432,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) uint32_t signal_levels, mask; uint8_t train_set = intel_dp->train_set[0]; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { signal_levels = intel_hsw_signal_levels(train_set); mask = DDI_BUF_EMP_MASK; } else if (IS_CHERRYVIEW(dev)) { @@ -3809,26 +3900,48 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) struct drm_device *dev = intel_dig_port->base.base.dev; struct intel_crtc *intel_crtc = to_intel_crtc(intel_dig_port->base.base.crtc); - u8 buf[1]; + u8 buf; + int test_crc_count; + int attempts = 6; - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) return -EIO; - if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) + if (!(buf & DP_TEST_CRC_SUPPORTED)) return -ENOTTY; + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) + return -EIO; + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, - DP_TEST_SINK_START) < 0) + buf | DP_TEST_SINK_START) < 0) + return -EIO; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) return -EIO; + test_crc_count = buf & DP_TEST_COUNT_MASK; - /* Wait 2 vblanks to be sure we will have the correct CRC value */ - intel_wait_for_vblank(dev, intel_crtc->pipe); - intel_wait_for_vblank(dev, intel_crtc->pipe); + do { + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_TEST_SINK_MISC, &buf) < 0) + return -EIO; + intel_wait_for_vblank(dev, intel_crtc->pipe); + } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count); + + if (attempts == 0) { + DRM_ERROR("Panel is unable to calculate CRC after 6 vblanks\n"); + return -EIO; + } if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) return -EIO; - drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) + return -EIO; + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, + buf & ~DP_TEST_SINK_START) < 0) + return -EIO; + return 0; } @@ -5077,7 +5190,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp->pps_pipe = INVALID_PIPE; /* intel_dp vfuncs */ - if (IS_VALLEYVIEW(dev)) + if (INTEL_INFO(dev)->gen >= 9) + intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; + else if (IS_VALLEYVIEW(dev)) intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; @@ -5086,7 +5201,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, else intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; - intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; + if (INTEL_INFO(dev)->gen >= 9) + intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; + else + intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; /* Preserve the current hw state. */ intel_dp->DP = I915_READ(intel_dp->output_reg); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index d9a7a7865f66..b03fa9026a9c 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -278,7 +278,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) } static enum drm_connector_status -intel_mst_port_dp_detect(struct drm_connector *connector) +intel_dp_mst_detect(struct drm_connector *connector, bool force) { struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; @@ -286,14 +286,6 @@ intel_mst_port_dp_detect(struct drm_connector *connector) return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port); } -static enum drm_connector_status -intel_dp_mst_detect(struct drm_connector *connector, bool force) -{ - enum drm_connector_status status; - status = intel_mst_port_dp_detect(connector); - return status; -} - static int intel_dp_mst_set_property(struct drm_connector *connector, struct drm_property *property, diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ba715229a540..5ab813c6091e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -34,6 +34,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_dp_mst_helper.h> +#include <drm/drm_rect.h> #define DIV_ROUND_CLOSEST_ULL(ll, d) \ ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) @@ -240,6 +241,17 @@ typedef struct dpll { int p; } intel_clock_t; +struct intel_plane_state { + struct drm_crtc *crtc; + struct drm_framebuffer *fb; + struct drm_rect src; + struct drm_rect dst; + struct drm_rect clip; + struct drm_rect orig_src; + struct drm_rect orig_dst; + bool visible; +}; + struct intel_plane_config { bool tiled; int size; @@ -734,32 +746,46 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) return container_of(intel_hdmi, struct intel_digital_port, hdmi); } +/* + * Returns the number of planes for this pipe, ie the number of sprites + 1 + * (primary plane). This doesn't count the cursor plane then. + */ +static inline unsigned int intel_num_planes(struct intel_crtc *crtc) +{ + return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1; +} -/* i915_irq.c */ -bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, +/* intel_fifo_underrun.c */ +bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable); -bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, +bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, enum transcoder pch_transcoder, bool enable); +void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe); +void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, + enum transcoder pch_transcoder); +void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv); + +/* i915_irq.c */ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void intel_runtime_pm_disable_interrupts(struct drm_device *dev); -void intel_runtime_pm_restore_interrupts(struct drm_device *dev); +void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); +void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) { /* * We only use drm_irq_uninstall() at unload and VT switch, so * this is the only thing we need to check. */ - return !dev_priv->pm._irqs_disabled; + return dev_priv->pm.irqs_enabled; } int intel_get_crtc_scanline(struct intel_crtc *crtc); -void i9xx_check_fifo_underruns(struct drm_device *dev); void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv); /* intel_crt.c */ @@ -792,11 +818,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); -/* intel_display.c */ -const char *intel_output_name(int output); -bool intel_has_pending_fb_unpin(struct drm_device *dev); -int intel_pch_rawclk(struct drm_device *dev); -void intel_mark_busy(struct drm_device *dev); +/* intel_frontbuffer.c */ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, struct intel_engine_cs *ring); void intel_frontbuffer_flip_prepare(struct drm_device *dev, @@ -806,7 +828,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev, void intel_frontbuffer_flush(struct drm_device *dev, unsigned frontbuffer_bits); /** - * intel_frontbuffer_flip - prepare frontbuffer flip + * intel_frontbuffer_flip - synchronous frontbuffer flip * @dev: DRM device * @frontbuffer_bits: frontbuffer plane tracking bits * @@ -824,6 +846,13 @@ void intel_frontbuffer_flip(struct drm_device *dev, } void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); + + +/* intel_display.c */ +const char *intel_output_name(int output); +bool intel_has_pending_fb_unpin(struct drm_device *dev); +int intel_pch_rawclk(struct drm_device *dev); +void intel_mark_busy(struct drm_device *dev); void intel_mark_idle(struct drm_device *dev); void intel_crtc_restore_mode(struct drm_crtc *crtc); void intel_crtc_control(struct drm_crtc *crtc, bool enable); @@ -844,7 +873,11 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe); -void intel_wait_for_vblank(struct drm_device *dev, int pipe); +static inline void +intel_wait_for_vblank(struct drm_device *dev, int pipe) +{ + drm_wait_one_vblank(dev, pipe); +} int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dport); @@ -878,6 +911,8 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc); void intel_put_shared_dpll(struct intel_crtc *crtc); /* modesetting asserts */ +void assert_panel_unlocked(struct drm_i915_private *dev_priv, + enum pipe pipe); void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pll_enabled(d, p) assert_pll(d, p, true) @@ -908,7 +943,6 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, bool intel_crtc_active(struct drm_crtc *crtc); void hsw_enable_ips(struct intel_crtc *crtc); void hsw_disable_ips(struct intel_crtc *crtc); -void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); enum intel_display_power_domain intel_display_port_power_domain(struct intel_encoder *intel_encoder); void intel_mode_from_pipe_config(struct drm_display_mode *mode, @@ -1055,6 +1089,28 @@ extern struct drm_display_mode *intel_find_panel_downclock( struct drm_display_mode *fixed_mode, struct drm_connector *connector); +/* intel_runtime_pm.c */ +int intel_power_domains_init(struct drm_i915_private *); +void intel_power_domains_fini(struct drm_i915_private *); +void intel_power_domains_init_hw(struct drm_i915_private *dev_priv); +void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); + +bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +void intel_display_power_get(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +void intel_display_power_put(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); +void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); +void intel_runtime_pm_get(struct drm_i915_private *dev_priv); +void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); +void intel_runtime_pm_put(struct drm_i915_private *dev_priv); + +void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); + /* intel_pm.c */ void intel_init_clock_gating(struct drm_device *dev); void intel_suspend_hw(struct drm_device *dev); @@ -1072,17 +1128,6 @@ bool intel_fbc_enabled(struct drm_device *dev); void intel_update_fbc(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); -int intel_power_domains_init(struct drm_i915_private *); -void intel_power_domains_remove(struct drm_i915_private *); -bool intel_display_power_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -void intel_display_power_get(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -void intel_display_power_put(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -void intel_power_domains_init_hw(struct drm_i915_private *dev_priv); void intel_init_gt_powersave(struct drm_device *dev); void intel_cleanup_gt_powersave(struct drm_device *dev); void intel_enable_gt_powersave(struct drm_device *dev); @@ -1093,13 +1138,6 @@ void ironlake_teardown_rc6(struct drm_device *dev); void gen6_update_ring_freq(struct drm_device *dev); void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_boost(struct drm_i915_private *dev_priv); -void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); -void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); -void intel_runtime_pm_get(struct drm_i915_private *dev_priv); -void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); -void intel_runtime_pm_put(struct drm_i915_private *dev_priv); -void intel_init_runtime_pm(struct drm_i915_private *dev_priv); -void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 5bd9e09ad3c5..0b184079de14 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -344,7 +344,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, DRM_DEBUG_KMS("\n"); power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_enabled(dev_priv, power_domain)) + if (!intel_display_power_is_enabled(dev_priv, power_domain)) return false; /* XXX: this only works for one DSI output */ diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c new file mode 100644 index 000000000000..77af512d2d35 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -0,0 +1,381 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + */ + +#include "i915_drv.h" +#include "intel_drv.h" + +/** + * DOC: fifo underrun handling + * + * The i915 driver checks for display fifo underruns using the interrupt signals + * provided by the hardware. This is enabled by default and fairly useful to + * debug display issues, especially watermark settings. + * + * If an underrun is detected this is logged into dmesg. To avoid flooding logs + * and occupying the cpu underrun interrupts are disabled after the first + * occurrence until the next modeset on a given pipe. + * + * Note that underrun detection on gmch platforms is a bit more ugly since there + * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe + * interrupt register). Also on some other platforms underrun interrupts are + * shared, which means that if we detect an underrun we need to disable underrun + * reporting on all pipes. + * + * The code also supports underrun detection on the PCH transcoder. + */ + +static bool ivb_can_enable_err_int(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc; + enum pipe pipe; + + assert_spin_locked(&dev_priv->irq_lock); + + for_each_pipe(dev_priv, pipe) { + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + + if (crtc->cpu_fifo_underrun_disabled) + return false; + } + + return true; +} + +static bool cpt_can_enable_serr_int(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe; + struct intel_crtc *crtc; + + assert_spin_locked(&dev_priv->irq_lock); + + for_each_pipe(dev_priv, pipe) { + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + + if (crtc->pch_fifo_underrun_disabled) + return false; + } + + return true; +} + +/** + * i9xx_check_fifo_underruns - check for fifo underruns + * @dev_priv: i915 device instance + * + * This function checks for fifo underruns on GMCH platforms. This needs to be + * done manually on modeset to make sure that we catch all underruns since they + * do not generate an interrupt by themselves on these platforms. + */ +void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + + spin_lock_irq(&dev_priv->irq_lock); + + for_each_intel_crtc(dev_priv->dev, crtc) { + u32 reg = PIPESTAT(crtc->pipe); + u32 pipestat; + + if (crtc->cpu_fifo_underrun_disabled) + continue; + + pipestat = I915_READ(reg) & 0xffff0000; + if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) + continue; + + I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); + POSTING_READ(reg); + + DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); + } + + spin_unlock_irq(&dev_priv->irq_lock); +} + +static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, + bool enable, bool old) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg = PIPESTAT(pipe); + u32 pipestat = I915_READ(reg) & 0xffff0000; + + assert_spin_locked(&dev_priv->irq_lock); + + if (enable) { + I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); + POSTING_READ(reg); + } else { + if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) + DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + } +} + +static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : + DE_PIPEB_FIFO_UNDERRUN; + + if (enable) + ironlake_enable_display_irq(dev_priv, bit); + else + ironlake_disable_display_irq(dev_priv, bit); +} + +static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, + bool enable, bool old) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (enable) { + I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); + + if (!ivb_can_enable_err_int(dev)) + return; + + ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); + } else { + ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); + + if (old && + I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { + DRM_ERROR("uncleared fifo underrun on pipe %c\n", + pipe_name(pipe)); + } + } +} + +static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + assert_spin_locked(&dev_priv->irq_lock); + + if (enable) + dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; + else + dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; + I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); +} + +static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, + enum transcoder pch_transcoder, + bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t bit = (pch_transcoder == TRANSCODER_A) ? + SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; + + if (enable) + ibx_enable_display_interrupt(dev_priv, bit); + else + ibx_disable_display_interrupt(dev_priv, bit); +} + +static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, + enum transcoder pch_transcoder, + bool enable, bool old) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (enable) { + I915_WRITE(SERR_INT, + SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); + + if (!cpt_can_enable_serr_int(dev)) + return; + + ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); + } else { + ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); + + if (old && I915_READ(SERR_INT) & + SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { + DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", + transcoder_name(pch_transcoder)); + } + } +} + +static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + bool old; + + assert_spin_locked(&dev_priv->irq_lock); + + old = !intel_crtc->cpu_fifo_underrun_disabled; + intel_crtc->cpu_fifo_underrun_disabled = !enable; + + if (HAS_GMCH_DISPLAY(dev)) + i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); + else if (IS_GEN5(dev) || IS_GEN6(dev)) + ironlake_set_fifo_underrun_reporting(dev, pipe, enable); + else if (IS_GEN7(dev)) + ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); + else if (IS_GEN8(dev) || IS_GEN9(dev)) + broadwell_set_fifo_underrun_reporting(dev, pipe, enable); + + return old; +} + +/** + * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state + * @dev_priv: i915 device instance + * @pipe: (CPU) pipe to set state for + * @enable: whether underruns should be reported or not + * + * This function sets the fifo underrun state for @pipe. It is used in the + * modeset code to avoid false positives since on many platforms underruns are + * expected when disabling or enabling the pipe. + * + * Notice that on some platforms disabling underrun reports for one pipe + * disables for all due to shared interrupts. Actual reporting is still per-pipe + * though. + * + * Returns the previous state of underrun reporting. + */ +bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, + enum pipe pipe, bool enable) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe, + enable); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + return ret; +} + +static bool +__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + return !intel_crtc->cpu_fifo_underrun_disabled; +} + +/** + * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state + * @dev_priv: i915 device instance + * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) + * @enable: whether underruns should be reported or not + * + * This function makes us disable or enable PCH fifo underruns for a specific + * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO + * underrun reporting for one transcoder may also disable all the other PCH + * error interruts for the other transcoders, due to the fact that there's just + * one interrupt mask/enable bit for all the transcoders. + * + * Returns the previous state of underrun reporting. + */ +bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, + enum transcoder pch_transcoder, + bool enable) +{ + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + unsigned long flags; + bool old; + + /* + * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT + * has only one pch transcoder A that all pipes can use. To avoid racy + * pch transcoder -> pipe lookups from interrupt code simply store the + * underrun statistics in crtc A. Since we never expose this anywhere + * nor use it outside of the fifo underrun code here using the "wrong" + * crtc on LPT won't cause issues. + */ + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + + old = !intel_crtc->pch_fifo_underrun_disabled; + intel_crtc->pch_fifo_underrun_disabled = !enable; + + if (HAS_PCH_IBX(dev_priv->dev)) + ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, + enable); + else + cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, + enable, old); + + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + return old; +} + +/** + * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt + * @dev_priv: i915 device instance + * @pipe: (CPU) pipe to set state for + * + * This handles a CPU fifo underrun interrupt, generating an underrun warning + * into dmesg if underrun reporting is enabled and then disables the underrun + * interrupt to avoid an irq storm. + */ +void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + /* GMCH can't disable fifo underruns, filter them. */ + if (HAS_GMCH_DISPLAY(dev_priv->dev) && + !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe)) + return; + + if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) + DRM_ERROR("CPU pipe %c FIFO underrun\n", + pipe_name(pipe)); +} + +/** + * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt + * @dev_priv: i915 device instance + * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) + * + * This handles a PCH fifo underrun interrupt, generating an underrun warning + * into dmesg if underrun reporting is enabled and then disables the underrun + * interrupt to avoid an irq storm. + */ +void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, + enum transcoder pch_transcoder) +{ + if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, + false)) + DRM_ERROR("PCH transcoder %c FIFO underrun\n", + transcoder_name(pch_transcoder)); +} diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c new file mode 100644 index 000000000000..58cf2e6b78f4 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -0,0 +1,279 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +/** + * DOC: frontbuffer tracking + * + * Many features require us to track changes to the currently active + * frontbuffer, especially rendering targeted at the frontbuffer. + * + * To be able to do so GEM tracks frontbuffers using a bitmask for all possible + * frontbuffer slots through i915_gem_track_fb(). The function in this file are + * then called when the contents of the frontbuffer are invalidated, when + * frontbuffer rendering has stopped again to flush out all the changes and when + * the frontbuffer is exchanged with a flip. Subsystems interested in + * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks + * into the relevant places and filter for the frontbuffer slots that they are + * interested int. + * + * On a high level there are two types of powersaving features. The first one + * work like a special cache (FBC and PSR) and are interested when they should + * stop caching and when to restart caching. This is done by placing callbacks + * into the invalidate and the flush functions: At invalidate the caching must + * be stopped and at flush time it can be restarted. And maybe they need to know + * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate + * and flush on its own) which can be achieved with placing callbacks into the + * flip functions. + * + * The other type of display power saving feature only cares about busyness + * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate + * busyness. There is no direct way to detect idleness. Instead an idle timer + * work delayed work should be started from the flush and flip functions and + * cancelled as soon as busyness is detected. + * + * Note that there's also an older frontbuffer activity tracking scheme which + * just tracks general activity. This is done by the various mark_busy and + * mark_idle functions. For display power management features using these + * functions is deprecated and should be avoided. + */ + +#include <drm/drmP.h> + +#include "intel_drv.h" +#include "i915_drv.h" + +static void intel_increase_pllclock(struct drm_device *dev, + enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int dpll_reg = DPLL(pipe); + int dpll; + + if (!HAS_GMCH_DISPLAY(dev)) + return; + + if (!dev_priv->lvds_downclock_avail) + return; + + dpll = I915_READ(dpll_reg); + if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { + DRM_DEBUG_DRIVER("upclocking LVDS\n"); + + assert_panel_unlocked(dev_priv, pipe); + + dpll &= ~DISPLAY_RATE_SELECT_FPA1; + I915_WRITE(dpll_reg, dpll); + intel_wait_for_vblank(dev, pipe); + + dpll = I915_READ(dpll_reg); + if (dpll & DISPLAY_RATE_SELECT_FPA1) + DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); + } +} + +/** + * intel_mark_fb_busy - mark given planes as busy + * @dev: DRM device + * @frontbuffer_bits: bits for the affected planes + * @ring: optional ring for asynchronous commands + * + * This function gets called every time the screen contents change. It can be + * used to keep e.g. the update rate at the nominal refresh rate with DRRS. + */ +static void intel_mark_fb_busy(struct drm_device *dev, + unsigned frontbuffer_bits, + struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe; + + if (!i915.powersave) + return; + + for_each_pipe(dev_priv, pipe) { + if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) + continue; + + intel_increase_pllclock(dev, pipe); + if (ring && intel_fbc_enabled(dev)) + ring->fbc_dirty = true; + } +} + +/** + * intel_fb_obj_invalidate - invalidate frontbuffer object + * @obj: GEM object to invalidate + * @ring: set for asynchronous rendering + * + * This function gets called every time rendering on the given object starts and + * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must + * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed + * until the rendering completes or a flip on this frontbuffer plane is + * scheduled. + */ +void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + struct intel_engine_cs *ring) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + if (!obj->frontbuffer_bits) + return; + + if (ring) { + mutex_lock(&dev_priv->fb_tracking.lock); + dev_priv->fb_tracking.busy_bits + |= obj->frontbuffer_bits; + dev_priv->fb_tracking.flip_bits + &= ~obj->frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + } + + intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); + + intel_edp_psr_invalidate(dev, obj->frontbuffer_bits); +} + +/** + * intel_frontbuffer_flush - flush frontbuffer + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called every time rendering on the given planes has + * completed and frontbuffer caching can be started again. Flushes will get + * delayed if they're blocked by some outstanding asynchronous rendering. + * + * Can be called without any locks held. + */ +void intel_frontbuffer_flush(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Delay flushing when rings are still busy.*/ + mutex_lock(&dev_priv->fb_tracking.lock); + frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + + intel_mark_fb_busy(dev, frontbuffer_bits, NULL); + + intel_edp_psr_flush(dev, frontbuffer_bits); + + /* + * FIXME: Unconditional fbc flushing here is a rather gross hack and + * needs to be reworked into a proper frontbuffer tracking scheme like + * psr employs. + */ + if (dev_priv->fbc.need_sw_cache_clean) { + dev_priv->fbc.need_sw_cache_clean = false; + bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN); + } +} + +/** + * intel_fb_obj_flush - flush frontbuffer object + * @obj: GEM object to flush + * @retire: set when retiring asynchronous rendering + * + * This function gets called every time rendering on the given object has + * completed and frontbuffer caching can be started again. If @retire is true + * then any delayed flushes will be unblocked. + */ +void intel_fb_obj_flush(struct drm_i915_gem_object *obj, + bool retire) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned frontbuffer_bits; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + if (!obj->frontbuffer_bits) + return; + + frontbuffer_bits = obj->frontbuffer_bits; + + if (retire) { + mutex_lock(&dev_priv->fb_tracking.lock); + /* Filter out new bits since rendering started. */ + frontbuffer_bits &= dev_priv->fb_tracking.busy_bits; + + dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + } + + intel_frontbuffer_flush(dev, frontbuffer_bits); +} + +/** + * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called after scheduling a flip on @obj. The actual + * frontbuffer flushing will be delayed until completion is signalled with + * intel_frontbuffer_flip_complete. If an invalidate happens in between this + * flush will be cancelled. + * + * Can be called without any locks held. + */ +void intel_frontbuffer_flip_prepare(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev_priv->fb_tracking.lock); + dev_priv->fb_tracking.flip_bits |= frontbuffer_bits; + /* Remove stale busy bits due to the old buffer. */ + dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); +} + +/** + * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called after the flip has been latched and will complete + * on the next vblank. It will execute the flush if it hasn't been cancelled yet. + * + * Can be called without any locks held. + */ +void intel_frontbuffer_flip_complete(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev_priv->fb_tracking.lock); + /* Mask any cancelled flips. */ + frontbuffer_bits &= dev_priv->fb_tracking.flip_bits; + dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + + intel_frontbuffer_flush(dev, frontbuffer_bits); +} diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 29ec1535992d..8b5f3aa027f3 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -690,7 +690,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, u32 tmp; power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_enabled(dev_priv, power_domain)) + if (!intel_display_power_is_enabled(dev_priv, power_domain)) return false; tmp = I915_READ(intel_hdmi->hdmi_reg); @@ -1405,6 +1405,15 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) mutex_lock(&dev_priv->dpio_lock); + /* allow hardware to manage TX FIFO reset source */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); + val &= ~DPIO_LANEDESKEW_STRAP_OVRD; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); + val &= ~DPIO_LANEDESKEW_STRAP_OVRD; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); + /* Deassert soft data lane reset*/ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); val |= CHV_PCS_REQ_SOFTRESET_EN; @@ -1441,12 +1450,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) /* Clear calc init */ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); + val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); + val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); + val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); + val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); + val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); + val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); + val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); + val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); + /* FIXME: Program the support xxx V-dB */ /* Use 800mV-0dB */ for (i = 0; i < 4; i++) { diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index bafd38b5703e..cd74e5ce4253 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1063,7 +1063,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; - if (!dev->irq_enabled) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1660,7 +1660,7 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring) * the creation is a deferred call: it's better to make sure first that we need to use * a given ring with the context. * - * Return: non-zero on eror. + * Return: non-zero on error. */ int intel_lr_context_deferred_create(struct intel_context *ctx, struct intel_engine_cs *ring) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index a6bd1422e38f..2b50c98dd6b0 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -76,7 +76,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, u32 tmp; power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_enabled(dev_priv, power_domain)) + if (!intel_display_power_is_enabled(dev_priv, power_domain)) return false; tmp = I915_READ(lvds_encoder->reg); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 0e018cb49147..e18b3f49074c 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -537,14 +537,13 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 val; - unsigned long flags; - spin_lock_irqsave(&dev_priv->backlight_lock, flags); + mutex_lock(&dev_priv->backlight_lock); val = dev_priv->display.get_backlight(connector); val = intel_panel_compute_brightness(connector, val); - spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); + mutex_unlock(&dev_priv->backlight_lock); DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); return val; @@ -628,12 +627,11 @@ static void intel_panel_set_backlight(struct intel_connector *connector, struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); u32 hw_level; - unsigned long flags; if (!panel->backlight.present || pipe == INVALID_PIPE) return; - spin_lock_irqsave(&dev_priv->backlight_lock, flags); + mutex_lock(&dev_priv->backlight_lock); WARN_ON(panel->backlight.max == 0); @@ -643,7 +641,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector, if (panel->backlight.enabled) intel_panel_actually_set_backlight(connector, hw_level); - spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); + mutex_unlock(&dev_priv->backlight_lock); } /* set backlight brightness to level in range [0..max], assuming hw min is @@ -657,12 +655,11 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector, struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); u32 hw_level; - unsigned long flags; if (!panel->backlight.present || pipe == INVALID_PIPE) return; - spin_lock_irqsave(&dev_priv->backlight_lock, flags); + mutex_lock(&dev_priv->backlight_lock); WARN_ON(panel->backlight.max == 0); @@ -678,7 +675,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector, if (panel->backlight.enabled) intel_panel_actually_set_backlight(connector, hw_level); - spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); + mutex_unlock(&dev_priv->backlight_lock); } static void pch_disable_backlight(struct intel_connector *connector) @@ -732,7 +729,6 @@ void intel_panel_disable_backlight(struct intel_connector *connector) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); - unsigned long flags; if (!panel->backlight.present || pipe == INVALID_PIPE) return; @@ -748,14 +744,14 @@ void intel_panel_disable_backlight(struct intel_connector *connector) return; } - spin_lock_irqsave(&dev_priv->backlight_lock, flags); + mutex_lock(&dev_priv->backlight_lock); if (panel->backlight.device) panel->backlight.device->props.power = FB_BLANK_POWERDOWN; panel->backlight.enabled = false; dev_priv->display.disable_backlight(connector); - spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); + mutex_unlock(&dev_priv->backlight_lock); } static void bdw_enable_backlight(struct intel_connector *connector) @@ -779,8 +775,9 @@ static void bdw_enable_backlight(struct intel_connector *connector) if (panel->backlight.active_low_pwm) pch_ctl1 |= BLM_PCH_POLARITY; - /* BDW always uses the pch pwm controls. */ - pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; + /* After LPT, override is the default. */ + if (HAS_PCH_LPT(dev_priv)) + pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); POSTING_READ(BLC_PWM_PCH_CTL1); @@ -936,14 +933,13 @@ void intel_panel_enable_backlight(struct intel_connector *connector) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); - unsigned long flags; if (!panel->backlight.present || pipe == INVALID_PIPE) return; DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); - spin_lock_irqsave(&dev_priv->backlight_lock, flags); + mutex_lock(&dev_priv->backlight_lock); WARN_ON(panel->backlight.max == 0); @@ -961,7 +957,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector) if (panel->backlight.device) panel->backlight.device->props.power = FB_BLANK_UNBLANK; - spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); + mutex_unlock(&dev_priv->backlight_lock); } #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) @@ -1266,7 +1262,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_panel *panel = &intel_connector->panel; - unsigned long flags; int ret; if (!dev_priv->vbt.backlight.present) { @@ -1279,9 +1274,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector) } /* set level and max in panel struct */ - spin_lock_irqsave(&dev_priv->backlight_lock, flags); + mutex_lock(&dev_priv->backlight_lock); ret = dev_priv->display.setup_backlight(intel_connector); - spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); + mutex_unlock(&dev_priv->backlight_lock); if (ret) { DRM_DEBUG_KMS("failed to setup backlight for connector %s\n", @@ -1316,7 +1311,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_BROADWELL(dev)) { + if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) { dev_priv->display.setup_backlight = bdw_setup_backlight; dev_priv->display.enable_backlight = bdw_enable_backlight; dev_priv->display.disable_backlight = pch_disable_backlight; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c27b6140bfd1..7a69eba533c7 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -30,9 +30,6 @@ #include "intel_drv.h" #include "../../../platform/x86/intel_ips.h" #include <linux/module.h> -#include <linux/vgaarb.h> -#include <drm/i915_powerwell.h> -#include <linux/pm_runtime.h> /** * RC6 is a special power stage which allows the GPU to enter an very @@ -66,11 +63,37 @@ * i915.i915_enable_fbc parameter */ +static void gen9_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* + * WaDisableSDEUnitClockGating:skl + * This seems to be a pre-production w/a. + */ + I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | + GEN8_SDEUNIT_CLOCK_GATE_DISABLE); + + /* + * WaDisableDgMirrorFixInHalfSliceChicken5:skl + * This is a pre-production w/a. + */ + I915_WRITE(GEN9_HALF_SLICE_CHICKEN5, + I915_READ(GEN9_HALF_SLICE_CHICKEN5) & + ~GEN9_DG_MIRROR_FIX_ENABLE); + + /* Wa4x4STCOptimizationDisable:skl */ + I915_WRITE(CACHE_MODE_1, + _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); +} + static void i8xx_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 fbc_ctl; + dev_priv->fbc.enabled = false; + /* Disable compression */ fbc_ctl = I915_READ(FBC_CONTROL); if ((fbc_ctl & FBC_CTL_EN) == 0) @@ -99,6 +122,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc) int i; u32 fbc_ctl; + dev_priv->fbc.enabled = true; + cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; if (fb->pitches[0] < cfb_pitch) cfb_pitch = fb->pitches[0]; @@ -153,6 +178,8 @@ static void g4x_enable_fbc(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 dpfc_ctl; + dev_priv->fbc.enabled = true; + dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN; if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) dpfc_ctl |= DPFC_CTL_LIMIT_2X; @@ -173,6 +200,8 @@ static void g4x_disable_fbc(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 dpfc_ctl; + dev_priv->fbc.enabled = false; + /* Disable compression */ dpfc_ctl = I915_READ(DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { @@ -224,6 +253,8 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 dpfc_ctl; + dev_priv->fbc.enabled = true; + dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) dev_priv->fbc.threshold++; @@ -264,6 +295,8 @@ static void ironlake_disable_fbc(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 dpfc_ctl; + dev_priv->fbc.enabled = false; + /* Disable compression */ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { @@ -290,6 +323,8 @@ static void gen7_enable_fbc(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 dpfc_ctl; + dev_priv->fbc.enabled = true; + dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) dev_priv->fbc.threshold++; @@ -339,19 +374,19 @@ bool intel_fbc_enabled(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (!dev_priv->display.fbc_enabled) - return false; - - return dev_priv->display.fbc_enabled(dev); + return dev_priv->fbc.enabled; } -void gen8_fbc_sw_flush(struct drm_device *dev, u32 value) +void bdw_fbc_sw_flush(struct drm_device *dev, u32 value) { struct drm_i915_private *dev_priv = dev->dev_private; if (!IS_GEN8(dev)) return; + if (!intel_fbc_enabled(dev)) + return; + I915_WRITE(MSG_FBC_REND_STATE, value); } @@ -1310,6 +1345,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc, int *prec_mult, int *drain_latency) { + struct drm_device *dev = crtc->dev; int entries; int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; @@ -1320,8 +1356,12 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc, return false; entries = DIV_ROUND_UP(clock, 1000) * pixel_size; - *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 : - DRAIN_LATENCY_PRECISION_32; + if (IS_CHERRYVIEW(dev)) + *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 : + DRAIN_LATENCY_PRECISION_16; + else + *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 : + DRAIN_LATENCY_PRECISION_32; *drain_latency = (64 * (*prec_mult) * 4) / entries; if (*drain_latency > DRAIN_LATENCY_MASK) @@ -1340,15 +1380,18 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc, static void vlv_update_drain_latency(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pixel_size; int drain_latency; enum pipe pipe = intel_crtc->pipe; int plane_prec, prec_mult, plane_dl; + const int high_precision = IS_CHERRYVIEW(dev) ? + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64; - plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 | - DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 | + plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH | + DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH | (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT)); if (!intel_crtc_active(crtc)) { @@ -1359,9 +1402,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc) /* Primary plane Drain Latency */ pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { - plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? - DDL_PLANE_PRECISION_64 : - DDL_PLANE_PRECISION_32; + plane_prec = (prec_mult == high_precision) ? + DDL_PLANE_PRECISION_HIGH : + DDL_PLANE_PRECISION_LOW; plane_dl |= plane_prec | drain_latency; } @@ -1373,9 +1416,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc) /* Program cursor DL only if it is enabled */ if (intel_crtc->cursor_base && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { - plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? - DDL_CURSOR_PRECISION_64 : - DDL_CURSOR_PRECISION_32; + plane_prec = (prec_mult == high_precision) ? + DDL_CURSOR_PRECISION_HIGH : + DDL_CURSOR_PRECISION_LOW; plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT); } @@ -1543,15 +1586,17 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane, int plane_prec; int sprite_dl; int prec_mult; + const int high_precision = IS_CHERRYVIEW(dev) ? + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64; - sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) | + sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) | (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite))); if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { - plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? - DDL_SPRITE_PRECISION_64(sprite) : - DDL_SPRITE_PRECISION_32(sprite); + plane_prec = (prec_mult == high_precision) ? + DDL_SPRITE_PRECISION_HIGH(sprite) : + DDL_SPRITE_PRECISION_LOW(sprite); sprite_dl |= plane_prec | (drain_latency << DDL_SPRITE_SHIFT(sprite)); } @@ -3594,10 +3639,15 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) else mode = 0; } - DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", - (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); + if (HAS_RC6p(dev)) + DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", + (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", + (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", + (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); + + else + DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", + (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); } static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) @@ -3614,7 +3664,7 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) if (enable_rc6 >= 0) { int mask; - if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) + if (HAS_RC6p(dev)) mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | INTEL_RC6pp_ENABLE; else @@ -5614,16 +5664,6 @@ static void broadwell_init_clock_gating(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); - /* FIXME(BDW): Check all the w/a, some might only apply to - * pre-production hw. */ - - - I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); - - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2))); - - /* WaSwitchSolVfFArbitrationPriority:bdw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -6041,1161 +6081,35 @@ void intel_suspend_hw(struct drm_device *dev) lpt_suspend_hw(dev); } -#define for_each_power_well(i, power_well, domain_mask, power_domains) \ - for (i = 0; \ - i < (power_domains)->power_well_count && \ - ((power_well) = &(power_domains)->power_wells[i]); \ - i++) \ - if ((power_well)->domains & (domain_mask)) - -#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ - for (i = (power_domains)->power_well_count - 1; \ - i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ - i--) \ - if ((power_well)->domains & (domain_mask)) - -/** - * We should only use the power well if we explicitly asked the hardware to - * enable it, so check if it's enabled and also check if we've requested it to - * be enabled. - */ -static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return I915_READ(HSW_PWR_WELL_DRIVER) == - (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); -} - -bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_domains *power_domains; - struct i915_power_well *power_well; - bool is_enabled; - int i; - - if (dev_priv->pm.suspended) - return false; - - power_domains = &dev_priv->power_domains; - - is_enabled = true; - - for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { - if (power_well->always_on) - continue; - - if (!power_well->hw_enabled) { - is_enabled = false; - break; - } - } - - return is_enabled; -} - -bool intel_display_power_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_domains *power_domains; - bool ret; - - power_domains = &dev_priv->power_domains; - - mutex_lock(&power_domains->lock); - ret = intel_display_power_enabled_unlocked(dev_priv, domain); - mutex_unlock(&power_domains->lock); - - return ret; -} - -/* - * Starting with Haswell, we have a "Power Down Well" that can be turned off - * when not needed anymore. We have 4 registers that can request the power well - * to be enabled, and it will only be disabled if none of the registers is - * requesting it to be enabled. - */ -static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - - /* - * After we re-enable the power well, if we touch VGA register 0x3d5 - * we'll get unclaimed register interrupts. This stops after we write - * anything to the VGA MSR register. The vgacon module uses this - * register all the time, so if we unbind our driver and, as a - * consequence, bind vgacon, we'll get stuck in an infinite loop at - * console_unlock(). So make here we touch the VGA MSR register, making - * sure vgacon can keep working normally without triggering interrupts - * and error messages. - */ - vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); - outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); - vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); - - if (IS_BROADWELL(dev)) - gen8_irq_power_well_post_enable(dev_priv); -} - -static void hsw_set_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, bool enable) -{ - bool is_enabled, enable_requested; - uint32_t tmp; - - tmp = I915_READ(HSW_PWR_WELL_DRIVER); - is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; - enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; - - if (enable) { - if (!enable_requested) - I915_WRITE(HSW_PWR_WELL_DRIVER, - HSW_PWR_WELL_ENABLE_REQUEST); - - if (!is_enabled) { - DRM_DEBUG_KMS("Enabling power well\n"); - if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & - HSW_PWR_WELL_STATE_ENABLED), 20)) - DRM_ERROR("Timeout enabling power well\n"); - } - - hsw_power_well_post_enable(dev_priv); - } else { - if (enable_requested) { - I915_WRITE(HSW_PWR_WELL_DRIVER, 0); - POSTING_READ(HSW_PWR_WELL_DRIVER); - DRM_DEBUG_KMS("Requesting to disable the power well\n"); - } - } -} - -static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - hsw_set_power_well(dev_priv, power_well, power_well->count > 0); - - /* - * We're taking over the BIOS, so clear any requests made by it since - * the driver is in charge now. - */ - if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) - I915_WRITE(HSW_PWR_WELL_BIOS, 0); -} - -static void hsw_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - hsw_set_power_well(dev_priv, power_well, true); -} - -static void hsw_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - hsw_set_power_well(dev_priv, power_well, false); -} - -static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ -} - -static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return true; -} - -static void vlv_set_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, bool enable) -{ - enum punit_power_well power_well_id = power_well->data; - u32 mask; - u32 state; - u32 ctrl; - - mask = PUNIT_PWRGT_MASK(power_well_id); - state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : - PUNIT_PWRGT_PWR_GATE(power_well_id); - - mutex_lock(&dev_priv->rps.hw_lock); - -#define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) - - if (COND) - goto out; - - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); - ctrl &= ~mask; - ctrl |= state; - vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); - - if (wait_for(COND, 100)) - DRM_ERROR("timout setting power well state %08x (%08x)\n", - state, - vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); - -#undef COND - -out: - mutex_unlock(&dev_priv->rps.hw_lock); -} - -static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, power_well->count > 0); -} - -static void vlv_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, true); -} - -static void vlv_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) +static void intel_init_fbc(struct drm_i915_private *dev_priv) { - vlv_set_power_well(dev_priv, power_well, false); -} - -static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - int power_well_id = power_well->data; - bool enabled = false; - u32 mask; - u32 state; - u32 ctrl; - - mask = PUNIT_PWRGT_MASK(power_well_id); - ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); - - mutex_lock(&dev_priv->rps.hw_lock); - - state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; - /* - * We only ever set the power-on and power-gate states, anything - * else is unexpected. - */ - WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && - state != PUNIT_PWRGT_PWR_GATE(power_well_id)); - if (state == ctrl) - enabled = true; - - /* - * A transient state at this point would mean some unexpected party - * is poking at the power controls too. - */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; - WARN_ON(ctrl != state); - - mutex_unlock(&dev_priv->rps.hw_lock); - - return enabled; -} - -static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); - - vlv_set_power_well(dev_priv, power_well, true); - - spin_lock_irq(&dev_priv->irq_lock); - valleyview_enable_display_irqs(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - /* - * During driver initialization/resume we can avoid restoring the - * part of the HW/SW state that will be inited anyway explicitly. - */ - if (dev_priv->power_domains.initializing) + if (!HAS_FBC(dev_priv)) { + dev_priv->fbc.enabled = false; return; - - intel_hpd_init(dev_priv->dev); - - i915_redisable_vga_power_on(dev_priv->dev); -} - -static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); - - spin_lock_irq(&dev_priv->irq_lock); - valleyview_disable_display_irqs(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - vlv_set_power_well(dev_priv, power_well, false); - - vlv_power_sequencer_reset(dev_priv); -} - -static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); - - /* - * Enable the CRI clock source so we can get at the - * display and the reference clock for VGA - * hotplug / manual detection. - */ - I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | - DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); - udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ - - vlv_set_power_well(dev_priv, power_well, true); - - /* - * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - - * 6. De-assert cmn_reset/side_reset. Same as VLV X0. - * a. GUnit 0x2110 bit[0] set to 1 (def 0) - * b. The other bits such as sfr settings / modesel may all - * be set to 0. - * - * This should only be done on init and resume from S3 with - * both PLLs disabled, or we risk losing DPIO and PLL - * synchronization. - */ - I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); -} - -static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum pipe pipe; - - WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); - - for_each_pipe(dev_priv, pipe) - assert_pll_disabled(dev_priv, pipe); - - /* Assert common reset */ - I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); - - vlv_set_power_well(dev_priv, power_well, false); -} - -static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum dpio_phy phy; - - WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && - power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); - - /* - * Enable the CRI clock source so we can get at the - * display and the reference clock for VGA - * hotplug / manual detection. - */ - if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { - phy = DPIO_PHY0; - I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | - DPLL_REFA_CLK_ENABLE_VLV); - I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | - DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); - } else { - phy = DPIO_PHY1; - I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | - DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); } - udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ - vlv_set_power_well(dev_priv, power_well, true); - - /* Poll for phypwrgood signal */ - if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) - DRM_ERROR("Display PHY %d is not power up\n", phy); - - I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) | - PHY_COM_LANE_RESET_DEASSERT(phy)); -} -static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum dpio_phy phy; - - WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && - power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); - - if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { - phy = DPIO_PHY0; - assert_pll_disabled(dev_priv, PIPE_A); - assert_pll_disabled(dev_priv, PIPE_B); + if (INTEL_INFO(dev_priv)->gen >= 7) { + dev_priv->display.fbc_enabled = ironlake_fbc_enabled; + dev_priv->display.enable_fbc = gen7_enable_fbc; + dev_priv->display.disable_fbc = ironlake_disable_fbc; + } else if (INTEL_INFO(dev_priv)->gen >= 5) { + dev_priv->display.fbc_enabled = ironlake_fbc_enabled; + dev_priv->display.enable_fbc = ironlake_enable_fbc; + dev_priv->display.disable_fbc = ironlake_disable_fbc; + } else if (IS_GM45(dev_priv)) { + dev_priv->display.fbc_enabled = g4x_fbc_enabled; + dev_priv->display.enable_fbc = g4x_enable_fbc; + dev_priv->display.disable_fbc = g4x_disable_fbc; } else { - phy = DPIO_PHY1; - assert_pll_disabled(dev_priv, PIPE_C); - } - - I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) & - ~PHY_COM_LANE_RESET_DEASSERT(phy)); - - vlv_set_power_well(dev_priv, power_well, false); -} + dev_priv->display.fbc_enabled = i8xx_fbc_enabled; + dev_priv->display.enable_fbc = i8xx_enable_fbc; + dev_priv->display.disable_fbc = i8xx_disable_fbc; -static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum pipe pipe = power_well->data; - bool enabled; - u32 state, ctrl; - - mutex_lock(&dev_priv->rps.hw_lock); - - state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); - /* - * We only ever set the power-on and power-gate states, anything - * else is unexpected. - */ - WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); - enabled = state == DP_SSS_PWR_ON(pipe); - - /* - * A transient state at this point would mean some unexpected party - * is poking at the power controls too. - */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); - WARN_ON(ctrl << 16 != state); - - mutex_unlock(&dev_priv->rps.hw_lock); - - return enabled; -} - -static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, - bool enable) -{ - enum pipe pipe = power_well->data; - u32 state; - u32 ctrl; - - state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); - - mutex_lock(&dev_priv->rps.hw_lock); - -#define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) - - if (COND) - goto out; - - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); - ctrl &= ~DP_SSC_MASK(pipe); - ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); - - if (wait_for(COND, 100)) - DRM_ERROR("timout setting power well state %08x (%08x)\n", - state, - vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); - -#undef COND - -out: - mutex_unlock(&dev_priv->rps.hw_lock); -} - -static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); -} - -static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - WARN_ON_ONCE(power_well->data != PIPE_A && - power_well->data != PIPE_B && - power_well->data != PIPE_C); - - chv_set_pipe_power_well(dev_priv, power_well, true); -} - -static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - WARN_ON_ONCE(power_well->data != PIPE_A && - power_well->data != PIPE_B && - power_well->data != PIPE_C); - - chv_set_pipe_power_well(dev_priv, power_well, false); -} - -static void check_power_well_state(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bool enabled = power_well->ops->is_enabled(dev_priv, power_well); - - if (power_well->always_on || !i915.disable_power_well) { - if (!enabled) - goto mismatch; - - return; + /* This value was pulled out of someone's hat */ + I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); } - if (enabled != (power_well->count > 0)) - goto mismatch; - - return; - -mismatch: - WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", - power_well->name, power_well->always_on, enabled, - power_well->count, i915.disable_power_well); -} - -void intel_display_power_get(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_domains *power_domains; - struct i915_power_well *power_well; - int i; - - intel_runtime_pm_get(dev_priv); - - power_domains = &dev_priv->power_domains; - - mutex_lock(&power_domains->lock); - - for_each_power_well(i, power_well, BIT(domain), power_domains) { - if (!power_well->count++) { - DRM_DEBUG_KMS("enabling %s\n", power_well->name); - power_well->ops->enable(dev_priv, power_well); - power_well->hw_enabled = true; - } - - check_power_well_state(dev_priv, power_well); - } - - power_domains->domain_use_count[domain]++; - - mutex_unlock(&power_domains->lock); -} - -void intel_display_power_put(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_domains *power_domains; - struct i915_power_well *power_well; - int i; - - power_domains = &dev_priv->power_domains; - - mutex_lock(&power_domains->lock); - - WARN_ON(!power_domains->domain_use_count[domain]); - power_domains->domain_use_count[domain]--; - - for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { - WARN_ON(!power_well->count); - - if (!--power_well->count && i915.disable_power_well) { - DRM_DEBUG_KMS("disabling %s\n", power_well->name); - power_well->hw_enabled = false; - power_well->ops->disable(dev_priv, power_well); - } - - check_power_well_state(dev_priv, power_well); - } - - mutex_unlock(&power_domains->lock); - - intel_runtime_pm_put(dev_priv); -} - -static struct i915_power_domains *hsw_pwr; - -/* Display audio driver power well request */ -int i915_request_power_well(void) -{ - struct drm_i915_private *dev_priv; - - if (!hsw_pwr) - return -ENODEV; - - dev_priv = container_of(hsw_pwr, struct drm_i915_private, - power_domains); - intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); - return 0; -} -EXPORT_SYMBOL_GPL(i915_request_power_well); - -/* Display audio driver power well release */ -int i915_release_power_well(void) -{ - struct drm_i915_private *dev_priv; - - if (!hsw_pwr) - return -ENODEV; - - dev_priv = container_of(hsw_pwr, struct drm_i915_private, - power_domains); - intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); - return 0; -} -EXPORT_SYMBOL_GPL(i915_release_power_well); - -/* - * Private interface for the audio driver to get CDCLK in kHz. - * - * Caller must request power well using i915_request_power_well() prior to - * making the call. - */ -int i915_get_cdclk_freq(void) -{ - struct drm_i915_private *dev_priv; - - if (!hsw_pwr) - return -ENODEV; - - dev_priv = container_of(hsw_pwr, struct drm_i915_private, - power_domains); - - return intel_ddi_get_cdclk_freq(dev_priv); -} -EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); - - -#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) - -#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PIPE_A) | \ - BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ - BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_CRT) | \ - BIT(POWER_DOMAIN_PLLS) | \ - BIT(POWER_DOMAIN_INIT)) -#define HSW_DISPLAY_POWER_DOMAINS ( \ - (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ - BIT(POWER_DOMAIN_INIT)) - -#define BDW_ALWAYS_ON_POWER_DOMAINS ( \ - HSW_ALWAYS_ON_POWER_DOMAINS | \ - BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) -#define BDW_DISPLAY_POWER_DOMAINS ( \ - (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \ - BIT(POWER_DOMAIN_INIT)) - -#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT) -#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK - -#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_CRT) | \ - BIT(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ - BIT(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ - BIT(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ - BIT(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ - BIT(POWER_DOMAIN_INIT)) - -#define CHV_PIPE_A_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PIPE_A) | \ - BIT(POWER_DOMAIN_INIT)) - -#define CHV_PIPE_B_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PIPE_B) | \ - BIT(POWER_DOMAIN_INIT)) - -#define CHV_PIPE_C_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PIPE_C) | \ - BIT(POWER_DOMAIN_INIT)) - -#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ - BIT(POWER_DOMAIN_INIT)) - -#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ - BIT(POWER_DOMAIN_INIT)) - -#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ - BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ - BIT(POWER_DOMAIN_INIT)) - -#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ - BIT(POWER_DOMAIN_INIT)) - -static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { - .sync_hw = i9xx_always_on_power_well_noop, - .enable = i9xx_always_on_power_well_noop, - .disable = i9xx_always_on_power_well_noop, - .is_enabled = i9xx_always_on_power_well_enabled, -}; - -static const struct i915_power_well_ops chv_pipe_power_well_ops = { - .sync_hw = chv_pipe_power_well_sync_hw, - .enable = chv_pipe_power_well_enable, - .disable = chv_pipe_power_well_disable, - .is_enabled = chv_pipe_power_well_enabled, -}; - -static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { - .sync_hw = vlv_power_well_sync_hw, - .enable = chv_dpio_cmn_power_well_enable, - .disable = chv_dpio_cmn_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static struct i915_power_well i9xx_always_on_power_well[] = { - { - .name = "always-on", - .always_on = 1, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - }, -}; - -static const struct i915_power_well_ops hsw_power_well_ops = { - .sync_hw = hsw_power_well_sync_hw, - .enable = hsw_power_well_enable, - .disable = hsw_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - -static struct i915_power_well hsw_power_wells[] = { - { - .name = "always-on", - .always_on = 1, - .domains = HSW_ALWAYS_ON_POWER_DOMAINS, - .ops = &i9xx_always_on_power_well_ops, - }, - { - .name = "display", - .domains = HSW_DISPLAY_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - }, -}; - -static struct i915_power_well bdw_power_wells[] = { - { - .name = "always-on", - .always_on = 1, - .domains = BDW_ALWAYS_ON_POWER_DOMAINS, - .ops = &i9xx_always_on_power_well_ops, - }, - { - .name = "display", - .domains = BDW_DISPLAY_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - }, -}; - -static const struct i915_power_well_ops vlv_display_power_well_ops = { - .sync_hw = vlv_power_well_sync_hw, - .enable = vlv_display_power_well_enable, - .disable = vlv_display_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { - .sync_hw = vlv_power_well_sync_hw, - .enable = vlv_dpio_cmn_power_well_enable, - .disable = vlv_dpio_cmn_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_ops vlv_dpio_power_well_ops = { - .sync_hw = vlv_power_well_sync_hw, - .enable = vlv_power_well_enable, - .disable = vlv_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static struct i915_power_well vlv_power_wells[] = { - { - .name = "always-on", - .always_on = 1, - .domains = VLV_ALWAYS_ON_POWER_DOMAINS, - .ops = &i9xx_always_on_power_well_ops, - }, - { - .name = "display", - .domains = VLV_DISPLAY_POWER_DOMAINS, - .data = PUNIT_POWER_WELL_DISP2D, - .ops = &vlv_display_power_well_ops, - }, - { - .name = "dpio-tx-b-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, - }, - { - .name = "dpio-tx-b-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, - }, - { - .name = "dpio-tx-c-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, - }, - { - .name = "dpio-tx-c-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, - }, - { - .name = "dpio-common", - .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, - .data = PUNIT_POWER_WELL_DPIO_CMN_BC, - .ops = &vlv_dpio_cmn_power_well_ops, - }, -}; - -static struct i915_power_well chv_power_wells[] = { - { - .name = "always-on", - .always_on = 1, - .domains = VLV_ALWAYS_ON_POWER_DOMAINS, - .ops = &i9xx_always_on_power_well_ops, - }, -#if 0 - { - .name = "display", - .domains = VLV_DISPLAY_POWER_DOMAINS, - .data = PUNIT_POWER_WELL_DISP2D, - .ops = &vlv_display_power_well_ops, - }, - { - .name = "pipe-a", - .domains = CHV_PIPE_A_POWER_DOMAINS, - .data = PIPE_A, - .ops = &chv_pipe_power_well_ops, - }, - { - .name = "pipe-b", - .domains = CHV_PIPE_B_POWER_DOMAINS, - .data = PIPE_B, - .ops = &chv_pipe_power_well_ops, - }, - { - .name = "pipe-c", - .domains = CHV_PIPE_C_POWER_DOMAINS, - .data = PIPE_C, - .ops = &chv_pipe_power_well_ops, - }, -#endif - { - .name = "dpio-common-bc", - /* - * XXX: cmnreset for one PHY seems to disturb the other. - * As a workaround keep both powered on at the same - * time for now. - */ - .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS, - .data = PUNIT_POWER_WELL_DPIO_CMN_BC, - .ops = &chv_dpio_cmn_power_well_ops, - }, - { - .name = "dpio-common-d", - /* - * XXX: cmnreset for one PHY seems to disturb the other. - * As a workaround keep both powered on at the same - * time for now. - */ - .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS, - .data = PUNIT_POWER_WELL_DPIO_CMN_D, - .ops = &chv_dpio_cmn_power_well_ops, - }, -#if 0 - { - .name = "dpio-tx-b-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, - }, - { - .name = "dpio-tx-b-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, - }, - { - .name = "dpio-tx-c-01", - .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, - }, - { - .name = "dpio-tx-c-23", - .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, - }, - { - .name = "dpio-tx-d-01", - .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS | - CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01, - }, - { - .name = "dpio-tx-d-23", - .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS | - CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23, - }, -#endif -}; - -static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, - enum punit_power_well power_well_id) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *power_well; - int i; - - for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { - if (power_well->data == power_well_id) - return power_well; - } - - return NULL; -} - -#define set_power_wells(power_domains, __power_wells) ({ \ - (power_domains)->power_wells = (__power_wells); \ - (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ -}) - -int intel_power_domains_init(struct drm_i915_private *dev_priv) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - - mutex_init(&power_domains->lock); - - /* - * The enabling order will be from lower to higher indexed wells, - * the disabling order is reversed. - */ - if (IS_HASWELL(dev_priv->dev)) { - set_power_wells(power_domains, hsw_power_wells); - hsw_pwr = power_domains; - } else if (IS_BROADWELL(dev_priv->dev)) { - set_power_wells(power_domains, bdw_power_wells); - hsw_pwr = power_domains; - } else if (IS_CHERRYVIEW(dev_priv->dev)) { - set_power_wells(power_domains, chv_power_wells); - } else if (IS_VALLEYVIEW(dev_priv->dev)) { - set_power_wells(power_domains, vlv_power_wells); - } else { - set_power_wells(power_domains, i9xx_always_on_power_well); - } - - return 0; -} - -void intel_power_domains_remove(struct drm_i915_private *dev_priv) -{ - hsw_pwr = NULL; -} - -static void intel_power_domains_resume(struct drm_i915_private *dev_priv) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *power_well; - int i; - - mutex_lock(&power_domains->lock); - for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { - power_well->ops->sync_hw(dev_priv, power_well); - power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, - power_well); - } - mutex_unlock(&power_domains->lock); -} - -static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) -{ - struct i915_power_well *cmn = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); - struct i915_power_well *disp2d = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); - - /* nothing to do if common lane is already off */ - if (!cmn->ops->is_enabled(dev_priv, cmn)) - return; - - /* If the display might be already active skip this */ - if (disp2d->ops->is_enabled(dev_priv, disp2d) && - I915_READ(DPIO_CTL) & DPIO_CMNRST) - return; - - DRM_DEBUG_KMS("toggling display PHY side reset\n"); - - /* cmnlane needs DPLL registers */ - disp2d->ops->enable(dev_priv, disp2d); - - /* - * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: - * Need to assert and de-assert PHY SB reset by gating the - * common lane power, then un-gating it. - * Simply ungating isn't enough to reset the PHY enough to get - * ports and lanes running. - */ - cmn->ops->disable(dev_priv, cmn); -} - -void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct i915_power_domains *power_domains = &dev_priv->power_domains; - - power_domains->initializing = true; - - if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { - mutex_lock(&power_domains->lock); - vlv_cmnlane_wa(dev_priv); - mutex_unlock(&power_domains->lock); - } - - /* For now, we need the power well to be always enabled. */ - intel_display_set_init_power(dev_priv, true); - intel_power_domains_resume(dev_priv); - power_domains->initializing = false; -} - -void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) -{ - intel_runtime_pm_get(dev_priv); -} - -void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) -{ - intel_runtime_pm_put(dev_priv); -} - -void intel_runtime_pm_get(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct device *device = &dev->pdev->dev; - - if (!HAS_RUNTIME_PM(dev)) - return; - - pm_runtime_get_sync(device); - WARN(dev_priv->pm.suspended, "Device still suspended.\n"); -} - -void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct device *device = &dev->pdev->dev; - - if (!HAS_RUNTIME_PM(dev)) - return; - - WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n"); - pm_runtime_get_noresume(device); -} - -void intel_runtime_pm_put(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct device *device = &dev->pdev->dev; - - if (!HAS_RUNTIME_PM(dev)) - return; - - pm_runtime_mark_last_busy(device); - pm_runtime_put_autosuspend(device); -} - -void intel_init_runtime_pm(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct device *device = &dev->pdev->dev; - - if (!HAS_RUNTIME_PM(dev)) - return; - - pm_runtime_set_active(device); - - /* - * RPM depends on RC6 to save restore the GT HW context, so make RC6 a - * requirement. - */ - if (!intel_enable_rc6(dev)) { - DRM_INFO("RC6 disabled, disabling runtime PM support\n"); - return; - } - - pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ - pm_runtime_mark_last_busy(device); - pm_runtime_use_autosuspend(device); - - pm_runtime_put_autosuspend(device); -} - -void intel_fini_runtime_pm(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct device *device = &dev->pdev->dev; - - if (!HAS_RUNTIME_PM(dev)) - return; - - if (!intel_enable_rc6(dev)) - return; - - /* Make sure we're not suspended first. */ - pm_runtime_get_sync(device); - pm_runtime_disable(device); + dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev); } /* Set up chip specific power management-related functions */ @@ -7203,28 +6117,7 @@ void intel_init_pm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (HAS_FBC(dev)) { - if (INTEL_INFO(dev)->gen >= 7) { - dev_priv->display.fbc_enabled = ironlake_fbc_enabled; - dev_priv->display.enable_fbc = gen7_enable_fbc; - dev_priv->display.disable_fbc = ironlake_disable_fbc; - } else if (INTEL_INFO(dev)->gen >= 5) { - dev_priv->display.fbc_enabled = ironlake_fbc_enabled; - dev_priv->display.enable_fbc = ironlake_enable_fbc; - dev_priv->display.disable_fbc = ironlake_disable_fbc; - } else if (IS_GM45(dev)) { - dev_priv->display.fbc_enabled = g4x_fbc_enabled; - dev_priv->display.enable_fbc = g4x_enable_fbc; - dev_priv->display.disable_fbc = g4x_disable_fbc; - } else { - dev_priv->display.fbc_enabled = i8xx_fbc_enabled; - dev_priv->display.enable_fbc = i8xx_enable_fbc; - dev_priv->display.disable_fbc = i8xx_disable_fbc; - - /* This value was pulled out of someone's hat */ - I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); - } - } + intel_init_fbc(dev_priv); /* For cxsr */ if (IS_PINEVIEW(dev)) @@ -7233,7 +6126,9 @@ void intel_init_pm(struct drm_device *dev) i915_ironlake_get_mem_freq(dev); /* For FIFO watermark updates */ - if (HAS_PCH_SPLIT(dev)) { + if (IS_GEN9(dev)) { + dev_priv->display.init_clock_gating = gen9_init_clock_gating; + } else if (HAS_PCH_SPLIT(dev)) { ilk_setup_wm_latency(dev); if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && @@ -7490,5 +6385,4 @@ void intel_pm_setup(struct drm_device *dev) intel_gen6_powersave_work); dev_priv->pm.suspended = false; - dev_priv->pm._irqs_disabled = false; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 0a80e419b589..a8f72e8d64e3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -665,76 +665,108 @@ err: return ret; } -static inline void intel_ring_emit_wa(struct intel_engine_cs *ring, - u32 addr, u32 value) +static int intel_ring_workarounds_emit(struct intel_engine_cs *ring) { + int ret, i; struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_workarounds *w = &dev_priv->workarounds; - if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS)) - return; + if (WARN_ON(w->count == 0)) + return 0; - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, addr); - intel_ring_emit(ring, value); + ring->gpu_caches_dirty = true; + ret = intel_ring_flush_all_caches(ring); + if (ret) + return ret; - dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr; - dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF; - /* value is updated with the status of remaining bits of this - * register when it is read from debugfs file - */ - dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value; - dev_priv->num_wa_regs++; + ret = intel_ring_begin(ring, (w->count * 2 + 2)); + if (ret) + return ret; - return; + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); + for (i = 0; i < w->count; i++) { + intel_ring_emit(ring, w->reg[i].addr); + intel_ring_emit(ring, w->reg[i].value); + } + intel_ring_emit(ring, MI_NOOP); + + intel_ring_advance(ring); + + ring->gpu_caches_dirty = true; + ret = intel_ring_flush_all_caches(ring); + if (ret) + return ret; + + DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count); + + return 0; } +static int wa_add(struct drm_i915_private *dev_priv, + const u32 addr, const u32 val, const u32 mask) +{ + const u32 idx = dev_priv->workarounds.count; + + if (WARN_ON(idx >= I915_MAX_WA_REGS)) + return -ENOSPC; + + dev_priv->workarounds.reg[idx].addr = addr; + dev_priv->workarounds.reg[idx].value = val; + dev_priv->workarounds.reg[idx].mask = mask; + + dev_priv->workarounds.count++; + + return 0; +} + +#define WA_REG(addr, val, mask) { \ + const int r = wa_add(dev_priv, (addr), (val), (mask)); \ + if (r) \ + return r; \ + } + +#define WA_SET_BIT_MASKED(addr, mask) \ + WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff) + +#define WA_CLR_BIT_MASKED(addr, mask) \ + WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff) + +#define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask) +#define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask) + +#define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff) + static int bdw_init_workarounds(struct intel_engine_cs *ring) { - int ret; struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; - /* - * workarounds applied in this fn are part of register state context, - * they need to be re-initialized followed by gpu reset, suspend/resume, - * module reload. - */ - dev_priv->num_wa_regs = 0; - memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs)); - - /* - * update the number of dwords required based on the - * actual number of workarounds applied - */ - ret = intel_ring_begin(ring, 18); - if (ret) - return ret; - /* WaDisablePartialInstShootdown:bdw */ - /* WaDisableThreadStallDopClockGating:bdw */ - /* FIXME: Unclear whether we really need this on production bdw. */ - intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, - _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE - | STALL_DOP_GATING_DISABLE)); + /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE | + STALL_DOP_GATING_DISABLE); - /* WaDisableDopClockGating:bdw May not be needed for production */ - intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); + /* WaDisableDopClockGating:bdw */ + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, + DOP_CLOCK_GATING_DISABLE); - intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, - _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, + GEN8_SAMPLER_POWER_BYPASS_DIS); /* Use Force Non-Coherent whenever executing a 3D context. This is a * workaround for for a possible hang in the unlikely event a TLB * invalidation occurs during a PSD flush. */ - intel_ring_emit_wa(ring, HDC_CHICKEN0, - _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT)); + /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FORCE_NON_COHERENT | + (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); /* Wa4x4STCOptimizationDisable:bdw */ - intel_ring_emit_wa(ring, CACHE_MODE_1, - _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); + WA_SET_BIT_MASKED(CACHE_MODE_1, + GEN8_4x4_STC_OPTIMIZATION_DISABLE); /* * BSpec recommends 8x4 when MSAA is used, @@ -744,52 +776,50 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring) * disable bit, which we don't touch here, but it's good * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). */ - intel_ring_emit_wa(ring, GEN7_GT_MODE, - GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); - - intel_ring_advance(ring); - - DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n", - dev_priv->num_wa_regs); + WA_SET_BIT_MASKED(GEN7_GT_MODE, + GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); return 0; } static int chv_init_workarounds(struct intel_engine_cs *ring) { - int ret; struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; - /* - * workarounds applied in this fn are part of register state context, - * they need to be re-initialized followed by gpu reset, suspend/resume, - * module reload. - */ - dev_priv->num_wa_regs = 0; - memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs)); - - ret = intel_ring_begin(ring, 12); - if (ret) - return ret; - /* WaDisablePartialInstShootdown:chv */ - intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, - _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE)); + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); /* WaDisableThreadStallDopClockGating:chv */ - intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, - _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE)); + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + STALL_DOP_GATING_DISABLE); /* WaDisableDopClockGating:chv (pre-production hw) */ - intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, + DOP_CLOCK_GATING_DISABLE); /* WaDisableSamplerPowerBypass:chv (pre-production hw) */ - intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, - _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, + GEN8_SAMPLER_POWER_BYPASS_DIS); - intel_ring_advance(ring); + return 0; +} + +static int init_workarounds_ring(struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + WARN_ON(ring->id != RCS); + + dev_priv->workarounds.count = 0; + + if (IS_BROADWELL(dev)) + return bdw_init_workarounds(ring); + + if (IS_CHERRYVIEW(dev)) + return chv_init_workarounds(ring); return 0; } @@ -812,7 +842,7 @@ static int init_render_ring(struct intel_engine_cs *ring) * * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv */ - if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); /* Required for the hardware to program scanline values for waiting */ @@ -849,7 +879,7 @@ static int init_render_ring(struct intel_engine_cs *ring) if (HAS_L3_DPF(dev)) I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); - return ret; + return init_workarounds_ring(ring); } static void render_ring_cleanup(struct intel_engine_cs *ring) @@ -1186,7 +1216,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; - if (!dev->irq_enabled) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1217,7 +1247,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; - if (!dev->irq_enabled) + if (!intel_irqs_enabled(dev_priv)) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1254,7 +1284,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; - if (!dev->irq_enabled) + if (!intel_irqs_enabled(dev_priv)) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1388,8 +1418,8 @@ gen6_ring_get_irq(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; - if (!dev->irq_enabled) - return false; + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { @@ -1431,7 +1461,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; - if (!dev->irq_enabled) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1451,9 +1481,6 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; - if (!dev->irq_enabled) - return; - spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { I915_WRITE_IMR(ring, ~0); @@ -1469,7 +1496,7 @@ gen8_ring_get_irq(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; - if (!dev->irq_enabled) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -2229,6 +2256,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, u32 invalidate, u32 flush) { struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; uint32_t cmd; int ret; @@ -2259,8 +2287,12 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, } intel_ring_advance(ring); - if (IS_GEN7(dev) && !invalidate && flush) - return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); + if (!invalidate && flush) { + if (IS_GEN7(dev)) + return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); + else if (IS_BROADWELL(dev)) + dev_priv->fbc.need_sw_cache_clean = true; + } return 0; } @@ -2293,10 +2325,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) dev_priv->semaphore_obj = obj; } } - if (IS_CHERRYVIEW(dev)) - ring->init_context = chv_init_workarounds; - else - ring->init_context = bdw_init_workarounds; + + ring->init_context = intel_ring_workarounds_emit; ring->add_request = gen6_add_request; ring->flush = gen8_render_ring_flush; ring->irq_get = gen8_ring_get_irq; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c new file mode 100644 index 000000000000..39c33e0a753c --- /dev/null +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -0,0 +1,1375 @@ +/* + * Copyright © 2012-2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eugeni Dodonov <eugeni.dodonov@intel.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + */ + +#include <linux/pm_runtime.h> +#include <linux/vgaarb.h> + +#include "i915_drv.h" +#include "intel_drv.h" +#include <drm/i915_powerwell.h> + +/** + * DOC: runtime pm + * + * The i915 driver supports dynamic enabling and disabling of entire hardware + * blocks at runtime. This is especially important on the display side where + * software is supposed to control many power gates manually on recent hardware, + * since on the GT side a lot of the power management is done by the hardware. + * But even there some manual control at the device level is required. + * + * Since i915 supports a diverse set of platforms with a unified codebase and + * hardware engineers just love to shuffle functionality around between power + * domains there's a sizeable amount of indirection required. This file provides + * generic functions to the driver for grabbing and releasing references for + * abstract power domains. It then maps those to the actual power wells + * present for a given platform. + */ + +static struct i915_power_domains *hsw_pwr; + +#define for_each_power_well(i, power_well, domain_mask, power_domains) \ + for (i = 0; \ + i < (power_domains)->power_well_count && \ + ((power_well) = &(power_domains)->power_wells[i]); \ + i++) \ + if ((power_well)->domains & (domain_mask)) + +#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ + for (i = (power_domains)->power_well_count - 1; \ + i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ + i--) \ + if ((power_well)->domains & (domain_mask)) + +/* + * We should only use the power well if we explicitly asked the hardware to + * enable it, so check if it's enabled and also check if we've requested it to + * be enabled. + */ +static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return I915_READ(HSW_PWR_WELL_DRIVER) == + (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); +} + +/** + * __intel_display_power_is_enabled - unlocked check for a power domain + * @dev_priv: i915 device instance + * @domain: power domain to check + * + * This is the unlocked version of intel_display_power_is_enabled() and should + * only be used from error capture and recovery code where deadlocks are + * possible. + * + * Returns: + * True when the power domain is enabled, false otherwise. + */ +bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains; + struct i915_power_well *power_well; + bool is_enabled; + int i; + + if (dev_priv->pm.suspended) + return false; + + power_domains = &dev_priv->power_domains; + + is_enabled = true; + + for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { + if (power_well->always_on) + continue; + + if (!power_well->hw_enabled) { + is_enabled = false; + break; + } + } + + return is_enabled; +} + +/** + * intel_display_power_is_enabled - unlocked check for a power domain + * @dev_priv: i915 device instance + * @domain: power domain to check + * + * This function can be used to check the hw power domain state. It is mostly + * used in hardware state readout functions. Everywhere else code should rely + * upon explicit power domain reference counting to ensure that the hardware + * block is powered up before accessing it. + * + * Callers must hold the relevant modesetting locks to ensure that concurrent + * threads can't disable the power well while the caller tries to read a few + * registers. + * + * Returns: + * True when the power domain is enabled, false otherwise. + */ +bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains; + bool ret; + + power_domains = &dev_priv->power_domains; + + mutex_lock(&power_domains->lock); + ret = __intel_display_power_is_enabled(dev_priv, domain); + mutex_unlock(&power_domains->lock); + + return ret; +} + +/** + * intel_display_set_init_power - set the initial power domain state + * @dev_priv: i915 device instance + * @enable: whether to enable or disable the initial power domain state + * + * For simplicity our driver load/unload and system suspend/resume code assumes + * that all power domains are always enabled. This functions controls the state + * of this little hack. While the initial power domain state is enabled runtime + * pm is effectively disabled. + */ +void intel_display_set_init_power(struct drm_i915_private *dev_priv, + bool enable) +{ + if (dev_priv->power_domains.init_power_on == enable) + return; + + if (enable) + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + else + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + dev_priv->power_domains.init_power_on = enable; +} + +/* + * Starting with Haswell, we have a "Power Down Well" that can be turned off + * when not needed anymore. We have 4 registers that can request the power well + * to be enabled, and it will only be disabled if none of the registers is + * requesting it to be enabled. + */ +static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + + /* + * After we re-enable the power well, if we touch VGA register 0x3d5 + * we'll get unclaimed register interrupts. This stops after we write + * anything to the VGA MSR register. The vgacon module uses this + * register all the time, so if we unbind our driver and, as a + * consequence, bind vgacon, we'll get stuck in an infinite loop at + * console_unlock(). So make here we touch the VGA MSR register, making + * sure vgacon can keep working normally without triggering interrupts + * and error messages. + */ + vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); + outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); + + if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) + gen8_irq_power_well_post_enable(dev_priv); +} + +static void hsw_set_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, bool enable) +{ + bool is_enabled, enable_requested; + uint32_t tmp; + + tmp = I915_READ(HSW_PWR_WELL_DRIVER); + is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; + enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; + + if (enable) { + if (!enable_requested) + I915_WRITE(HSW_PWR_WELL_DRIVER, + HSW_PWR_WELL_ENABLE_REQUEST); + + if (!is_enabled) { + DRM_DEBUG_KMS("Enabling power well\n"); + if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & + HSW_PWR_WELL_STATE_ENABLED), 20)) + DRM_ERROR("Timeout enabling power well\n"); + hsw_power_well_post_enable(dev_priv); + } + + } else { + if (enable_requested) { + I915_WRITE(HSW_PWR_WELL_DRIVER, 0); + POSTING_READ(HSW_PWR_WELL_DRIVER); + DRM_DEBUG_KMS("Requesting to disable the power well\n"); + } + } +} + +static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + hsw_set_power_well(dev_priv, power_well, power_well->count > 0); + + /* + * We're taking over the BIOS, so clear any requests made by it since + * the driver is in charge now. + */ + if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) + I915_WRITE(HSW_PWR_WELL_BIOS, 0); +} + +static void hsw_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + hsw_set_power_well(dev_priv, power_well, true); +} + +static void hsw_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + hsw_set_power_well(dev_priv, power_well, false); +} + +static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ +} + +static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return true; +} + +static void vlv_set_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, bool enable) +{ + enum punit_power_well power_well_id = power_well->data; + u32 mask; + u32 state; + u32 ctrl; + + mask = PUNIT_PWRGT_MASK(power_well_id); + state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : + PUNIT_PWRGT_PWR_GATE(power_well_id); + + mutex_lock(&dev_priv->rps.hw_lock); + +#define COND \ + ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) + + if (COND) + goto out; + + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); + ctrl &= ~mask; + ctrl |= state; + vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); + + if (wait_for(COND, 100)) + DRM_ERROR("timout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); + +#undef COND + +out: + mutex_unlock(&dev_priv->rps.hw_lock); +} + +static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, power_well->count > 0); +} + +static void vlv_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, true); +} + +static void vlv_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, false); +} + +static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int power_well_id = power_well->data; + bool enabled = false; + u32 mask; + u32 state; + u32 ctrl; + + mask = PUNIT_PWRGT_MASK(power_well_id); + ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); + + mutex_lock(&dev_priv->rps.hw_lock); + + state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; + /* + * We only ever set the power-on and power-gate states, anything + * else is unexpected. + */ + WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && + state != PUNIT_PWRGT_PWR_GATE(power_well_id)); + if (state == ctrl) + enabled = true; + + /* + * A transient state at this point would mean some unexpected party + * is poking at the power controls too. + */ + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; + WARN_ON(ctrl != state); + + mutex_unlock(&dev_priv->rps.hw_lock); + + return enabled; +} + +static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); + + vlv_set_power_well(dev_priv, power_well, true); + + spin_lock_irq(&dev_priv->irq_lock); + valleyview_enable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + /* + * During driver initialization/resume we can avoid restoring the + * part of the HW/SW state that will be inited anyway explicitly. + */ + if (dev_priv->power_domains.initializing) + return; + + intel_hpd_init(dev_priv); + + i915_redisable_vga_power_on(dev_priv->dev); +} + +static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); + + spin_lock_irq(&dev_priv->irq_lock); + valleyview_disable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + vlv_set_power_well(dev_priv, power_well, false); + + vlv_power_sequencer_reset(dev_priv); +} + +static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); + + /* + * Enable the CRI clock source so we can get at the + * display and the reference clock for VGA + * hotplug / manual detection. + */ + I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | + DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + + vlv_set_power_well(dev_priv, power_well, true); + + /* + * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - + * 6. De-assert cmn_reset/side_reset. Same as VLV X0. + * a. GUnit 0x2110 bit[0] set to 1 (def 0) + * b. The other bits such as sfr settings / modesel may all + * be set to 0. + * + * This should only be done on init and resume from S3 with + * both PLLs disabled, or we risk losing DPIO and PLL + * synchronization. + */ + I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); +} + +static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum pipe pipe; + + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); + + for_each_pipe(dev_priv, pipe) + assert_pll_disabled(dev_priv, pipe); + + /* Assert common reset */ + I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); + + vlv_set_power_well(dev_priv, power_well, false); +} + +static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum dpio_phy phy; + + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && + power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); + + /* + * Enable the CRI clock source so we can get at the + * display and the reference clock for VGA + * hotplug / manual detection. + */ + if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { + phy = DPIO_PHY0; + I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | + DPLL_REFA_CLK_ENABLE_VLV); + I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | + DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); + } else { + phy = DPIO_PHY1; + I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | + DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); + } + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + vlv_set_power_well(dev_priv, power_well, true); + + /* Poll for phypwrgood signal */ + if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) + DRM_ERROR("Display PHY %d is not power up\n", phy); + + I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) | + PHY_COM_LANE_RESET_DEASSERT(phy)); +} + +static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum dpio_phy phy; + + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && + power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); + + if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { + phy = DPIO_PHY0; + assert_pll_disabled(dev_priv, PIPE_A); + assert_pll_disabled(dev_priv, PIPE_B); + } else { + phy = DPIO_PHY1; + assert_pll_disabled(dev_priv, PIPE_C); + } + + I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) & + ~PHY_COM_LANE_RESET_DEASSERT(phy)); + + vlv_set_power_well(dev_priv, power_well, false); +} + +static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum pipe pipe = power_well->data; + bool enabled; + u32 state, ctrl; + + mutex_lock(&dev_priv->rps.hw_lock); + + state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); + /* + * We only ever set the power-on and power-gate states, anything + * else is unexpected. + */ + WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); + enabled = state == DP_SSS_PWR_ON(pipe); + + /* + * A transient state at this point would mean some unexpected party + * is poking at the power controls too. + */ + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); + WARN_ON(ctrl << 16 != state); + + mutex_unlock(&dev_priv->rps.hw_lock); + + return enabled; +} + +static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + bool enable) +{ + enum pipe pipe = power_well->data; + u32 state; + u32 ctrl; + + state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); + + mutex_lock(&dev_priv->rps.hw_lock); + +#define COND \ + ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) + + if (COND) + goto out; + + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + ctrl &= ~DP_SSC_MASK(pipe); + ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); + vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); + + if (wait_for(COND, 100)) + DRM_ERROR("timout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); + +#undef COND + +out: + mutex_unlock(&dev_priv->rps.hw_lock); +} + +static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); +} + +static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN_ON_ONCE(power_well->data != PIPE_A && + power_well->data != PIPE_B && + power_well->data != PIPE_C); + + chv_set_pipe_power_well(dev_priv, power_well, true); +} + +static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN_ON_ONCE(power_well->data != PIPE_A && + power_well->data != PIPE_B && + power_well->data != PIPE_C); + + chv_set_pipe_power_well(dev_priv, power_well, false); +} + +static void check_power_well_state(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + bool enabled = power_well->ops->is_enabled(dev_priv, power_well); + + if (power_well->always_on || !i915.disable_power_well) { + if (!enabled) + goto mismatch; + + return; + } + + if (enabled != (power_well->count > 0)) + goto mismatch; + + return; + +mismatch: + WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", + power_well->name, power_well->always_on, enabled, + power_well->count, i915.disable_power_well); +} + +/** + * intel_display_power_get - grab a power domain reference + * @dev_priv: i915 device instance + * @domain: power domain to reference + * + * This function grabs a power domain reference for @domain and ensures that the + * power domain and all its parents are powered up. Therefore users should only + * grab a reference to the innermost power domain they need. + * + * Any power domain reference obtained by this function must have a symmetric + * call to intel_display_power_put() to release the reference again. + */ +void intel_display_power_get(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains; + struct i915_power_well *power_well; + int i; + + intel_runtime_pm_get(dev_priv); + + power_domains = &dev_priv->power_domains; + + mutex_lock(&power_domains->lock); + + for_each_power_well(i, power_well, BIT(domain), power_domains) { + if (!power_well->count++) { + DRM_DEBUG_KMS("enabling %s\n", power_well->name); + power_well->ops->enable(dev_priv, power_well); + power_well->hw_enabled = true; + } + + check_power_well_state(dev_priv, power_well); + } + + power_domains->domain_use_count[domain]++; + + mutex_unlock(&power_domains->lock); +} + +/** + * intel_display_power_put - release a power domain reference + * @dev_priv: i915 device instance + * @domain: power domain to reference + * + * This function drops the power domain reference obtained by + * intel_display_power_get() and might power down the corresponding hardware + * block right away if this is the last reference. + */ +void intel_display_power_put(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains; + struct i915_power_well *power_well; + int i; + + power_domains = &dev_priv->power_domains; + + mutex_lock(&power_domains->lock); + + WARN_ON(!power_domains->domain_use_count[domain]); + power_domains->domain_use_count[domain]--; + + for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { + WARN_ON(!power_well->count); + + if (!--power_well->count && i915.disable_power_well) { + DRM_DEBUG_KMS("disabling %s\n", power_well->name); + power_well->hw_enabled = false; + power_well->ops->disable(dev_priv, power_well); + } + + check_power_well_state(dev_priv, power_well); + } + + mutex_unlock(&power_domains->lock); + + intel_runtime_pm_put(dev_priv); +} + +#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) + +#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PIPE_A) | \ + BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ + BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_CRT) | \ + BIT(POWER_DOMAIN_PLLS) | \ + BIT(POWER_DOMAIN_INIT)) +#define HSW_DISPLAY_POWER_DOMAINS ( \ + (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ + BIT(POWER_DOMAIN_INIT)) + +#define BDW_ALWAYS_ON_POWER_DOMAINS ( \ + HSW_ALWAYS_ON_POWER_DOMAINS | \ + BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) +#define BDW_DISPLAY_POWER_DOMAINS ( \ + (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \ + BIT(POWER_DOMAIN_INIT)) + +#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT) +#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK + +#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_CRT) | \ + BIT(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_PIPE_A_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PIPE_A) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_PIPE_B_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PIPE_B) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_PIPE_C_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PIPE_C) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { + .sync_hw = i9xx_always_on_power_well_noop, + .enable = i9xx_always_on_power_well_noop, + .disable = i9xx_always_on_power_well_noop, + .is_enabled = i9xx_always_on_power_well_enabled, +}; + +static const struct i915_power_well_ops chv_pipe_power_well_ops = { + .sync_hw = chv_pipe_power_well_sync_hw, + .enable = chv_pipe_power_well_enable, + .disable = chv_pipe_power_well_disable, + .is_enabled = chv_pipe_power_well_enabled, +}; + +static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { + .sync_hw = vlv_power_well_sync_hw, + .enable = chv_dpio_cmn_power_well_enable, + .disable = chv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static struct i915_power_well i9xx_always_on_power_well[] = { + { + .name = "always-on", + .always_on = 1, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + }, +}; + +static const struct i915_power_well_ops hsw_power_well_ops = { + .sync_hw = hsw_power_well_sync_hw, + .enable = hsw_power_well_enable, + .disable = hsw_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +static struct i915_power_well hsw_power_wells[] = { + { + .name = "always-on", + .always_on = 1, + .domains = HSW_ALWAYS_ON_POWER_DOMAINS, + .ops = &i9xx_always_on_power_well_ops, + }, + { + .name = "display", + .domains = HSW_DISPLAY_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + }, +}; + +static struct i915_power_well bdw_power_wells[] = { + { + .name = "always-on", + .always_on = 1, + .domains = BDW_ALWAYS_ON_POWER_DOMAINS, + .ops = &i9xx_always_on_power_well_ops, + }, + { + .name = "display", + .domains = BDW_DISPLAY_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + }, +}; + +static const struct i915_power_well_ops vlv_display_power_well_ops = { + .sync_hw = vlv_power_well_sync_hw, + .enable = vlv_display_power_well_enable, + .disable = vlv_display_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { + .sync_hw = vlv_power_well_sync_hw, + .enable = vlv_dpio_cmn_power_well_enable, + .disable = vlv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static const struct i915_power_well_ops vlv_dpio_power_well_ops = { + .sync_hw = vlv_power_well_sync_hw, + .enable = vlv_power_well_enable, + .disable = vlv_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static struct i915_power_well vlv_power_wells[] = { + { + .name = "always-on", + .always_on = 1, + .domains = VLV_ALWAYS_ON_POWER_DOMAINS, + .ops = &i9xx_always_on_power_well_ops, + }, + { + .name = "display", + .domains = VLV_DISPLAY_POWER_DOMAINS, + .data = PUNIT_POWER_WELL_DISP2D, + .ops = &vlv_display_power_well_ops, + }, + { + .name = "dpio-tx-b-01", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, + }, + { + .name = "dpio-tx-b-23", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, + }, + { + .name = "dpio-tx-c-01", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, + }, + { + .name = "dpio-tx-c-23", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, + }, + { + .name = "dpio-common", + .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, + .data = PUNIT_POWER_WELL_DPIO_CMN_BC, + .ops = &vlv_dpio_cmn_power_well_ops, + }, +}; + +static struct i915_power_well chv_power_wells[] = { + { + .name = "always-on", + .always_on = 1, + .domains = VLV_ALWAYS_ON_POWER_DOMAINS, + .ops = &i9xx_always_on_power_well_ops, + }, +#if 0 + { + .name = "display", + .domains = VLV_DISPLAY_POWER_DOMAINS, + .data = PUNIT_POWER_WELL_DISP2D, + .ops = &vlv_display_power_well_ops, + }, + { + .name = "pipe-a", + .domains = CHV_PIPE_A_POWER_DOMAINS, + .data = PIPE_A, + .ops = &chv_pipe_power_well_ops, + }, + { + .name = "pipe-b", + .domains = CHV_PIPE_B_POWER_DOMAINS, + .data = PIPE_B, + .ops = &chv_pipe_power_well_ops, + }, + { + .name = "pipe-c", + .domains = CHV_PIPE_C_POWER_DOMAINS, + .data = PIPE_C, + .ops = &chv_pipe_power_well_ops, + }, +#endif + { + .name = "dpio-common-bc", + /* + * XXX: cmnreset for one PHY seems to disturb the other. + * As a workaround keep both powered on at the same + * time for now. + */ + .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS, + .data = PUNIT_POWER_WELL_DPIO_CMN_BC, + .ops = &chv_dpio_cmn_power_well_ops, + }, + { + .name = "dpio-common-d", + /* + * XXX: cmnreset for one PHY seems to disturb the other. + * As a workaround keep both powered on at the same + * time for now. + */ + .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS, + .data = PUNIT_POWER_WELL_DPIO_CMN_D, + .ops = &chv_dpio_cmn_power_well_ops, + }, +#if 0 + { + .name = "dpio-tx-b-01", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, + }, + { + .name = "dpio-tx-b-23", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, + }, + { + .name = "dpio-tx-c-01", + .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, + }, + { + .name = "dpio-tx-c-23", + .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, + }, + { + .name = "dpio-tx-d-01", + .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS | + CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01, + }, + { + .name = "dpio-tx-d-23", + .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS | + CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23, + }, +#endif +}; + +static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, + enum punit_power_well power_well_id) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + int i; + + for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { + if (power_well->data == power_well_id) + return power_well; + } + + return NULL; +} + +#define set_power_wells(power_domains, __power_wells) ({ \ + (power_domains)->power_wells = (__power_wells); \ + (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ +}) + +/** + * intel_power_domains_init - initializes the power domain structures + * @dev_priv: i915 device instance + * + * Initializes the power domain structures for @dev_priv depending upon the + * supported platform. + */ +int intel_power_domains_init(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + + mutex_init(&power_domains->lock); + + /* + * The enabling order will be from lower to higher indexed wells, + * the disabling order is reversed. + */ + if (IS_HASWELL(dev_priv->dev)) { + set_power_wells(power_domains, hsw_power_wells); + hsw_pwr = power_domains; + } else if (IS_BROADWELL(dev_priv->dev)) { + set_power_wells(power_domains, bdw_power_wells); + hsw_pwr = power_domains; + } else if (IS_CHERRYVIEW(dev_priv->dev)) { + set_power_wells(power_domains, chv_power_wells); + } else if (IS_VALLEYVIEW(dev_priv->dev)) { + set_power_wells(power_domains, vlv_power_wells); + } else { + set_power_wells(power_domains, i9xx_always_on_power_well); + } + + return 0; +} + +static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct device *device = &dev->pdev->dev; + + if (!HAS_RUNTIME_PM(dev)) + return; + + if (!intel_enable_rc6(dev)) + return; + + /* Make sure we're not suspended first. */ + pm_runtime_get_sync(device); + pm_runtime_disable(device); +} + +/** + * intel_power_domains_fini - finalizes the power domain structures + * @dev_priv: i915 device instance + * + * Finalizes the power domain structures for @dev_priv depending upon the + * supported platform. This function also disables runtime pm and ensures that + * the device stays powered up so that the driver can be reloaded. + */ +void intel_power_domains_fini(struct drm_i915_private *dev_priv) +{ + intel_runtime_pm_disable(dev_priv); + + /* The i915.ko module is still not prepared to be loaded when + * the power well is not enabled, so just enable it in case + * we're going to unload/reload. */ + intel_display_set_init_power(dev_priv, true); + + hsw_pwr = NULL; +} + +static void intel_power_domains_resume(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + int i; + + mutex_lock(&power_domains->lock); + for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { + power_well->ops->sync_hw(dev_priv, power_well); + power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, + power_well); + } + mutex_unlock(&power_domains->lock); +} + +static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *cmn = + lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); + struct i915_power_well *disp2d = + lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); + + /* nothing to do if common lane is already off */ + if (!cmn->ops->is_enabled(dev_priv, cmn)) + return; + + /* If the display might be already active skip this */ + if (disp2d->ops->is_enabled(dev_priv, disp2d) && + I915_READ(DPIO_CTL) & DPIO_CMNRST) + return; + + DRM_DEBUG_KMS("toggling display PHY side reset\n"); + + /* cmnlane needs DPLL registers */ + disp2d->ops->enable(dev_priv, disp2d); + + /* + * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: + * Need to assert and de-assert PHY SB reset by gating the + * common lane power, then un-gating it. + * Simply ungating isn't enough to reset the PHY enough to get + * ports and lanes running. + */ + cmn->ops->disable(dev_priv, cmn); +} + +/** + * intel_power_domains_init_hw - initialize hardware power domain state + * @dev_priv: i915 device instance + * + * This function initializes the hardware power domain state and enables all + * power domains using intel_display_set_init_power(). + */ +void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct i915_power_domains *power_domains = &dev_priv->power_domains; + + power_domains->initializing = true; + + if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { + mutex_lock(&power_domains->lock); + vlv_cmnlane_wa(dev_priv); + mutex_unlock(&power_domains->lock); + } + + /* For now, we need the power well to be always enabled. */ + intel_display_set_init_power(dev_priv, true); + intel_power_domains_resume(dev_priv); + power_domains->initializing = false; +} + +/** + * intel_aux_display_runtime_get - grab an auxilliary power domain reference + * @dev_priv: i915 device instance + * + * This function grabs a power domain reference for the auxiliary power domain + * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its + * parents are powered up. Therefore users should only grab a reference to the + * innermost power domain they need. + * + * Any power domain reference obtained by this function must have a symmetric + * call to intel_aux_display_runtime_put() to release the reference again. + */ +void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) +{ + intel_runtime_pm_get(dev_priv); +} + +/** + * intel_aux_display_runtime_put - release an auxilliary power domain reference + * @dev_priv: i915 device instance + * + * This function drops the auxilliary power domain reference obtained by + * intel_aux_display_runtime_get() and might power down the corresponding + * hardware block right away if this is the last reference. + */ +void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) +{ + intel_runtime_pm_put(dev_priv); +} + +/** + * intel_runtime_pm_get - grab a runtime pm reference + * @dev_priv: i915 device instance + * + * This function grabs a device-level runtime pm reference (mostly used for GEM + * code to ensure the GTT or GT is on) and ensures that it is powered up. + * + * Any runtime pm reference obtained by this function must have a symmetric + * call to intel_runtime_pm_put() to release the reference again. + */ +void intel_runtime_pm_get(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct device *device = &dev->pdev->dev; + + if (!HAS_RUNTIME_PM(dev)) + return; + + pm_runtime_get_sync(device); + WARN(dev_priv->pm.suspended, "Device still suspended.\n"); +} + +/** + * intel_runtime_pm_get_noresume - grab a runtime pm reference + * @dev_priv: i915 device instance + * + * This function grabs a device-level runtime pm reference (mostly used for GEM + * code to ensure the GTT or GT is on). + * + * It will _not_ power up the device but instead only check that it's powered + * on. Therefore it is only valid to call this functions from contexts where + * the device is known to be powered up and where trying to power it up would + * result in hilarity and deadlocks. That pretty much means only the system + * suspend/resume code where this is used to grab runtime pm references for + * delayed setup down in work items. + * + * Any runtime pm reference obtained by this function must have a symmetric + * call to intel_runtime_pm_put() to release the reference again. + */ +void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct device *device = &dev->pdev->dev; + + if (!HAS_RUNTIME_PM(dev)) + return; + + WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n"); + pm_runtime_get_noresume(device); +} + +/** + * intel_runtime_pm_put - release a runtime pm reference + * @dev_priv: i915 device instance + * + * This function drops the device-level runtime pm reference obtained by + * intel_runtime_pm_get() and might power down the corresponding + * hardware block right away if this is the last reference. + */ +void intel_runtime_pm_put(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct device *device = &dev->pdev->dev; + + if (!HAS_RUNTIME_PM(dev)) + return; + + pm_runtime_mark_last_busy(device); + pm_runtime_put_autosuspend(device); +} + +/** + * intel_runtime_pm_enable - enable runtime pm + * @dev_priv: i915 device instance + * + * This function enables runtime pm at the end of the driver load sequence. + * + * Note that this function does currently not enable runtime pm for the + * subordinate display power domains. That is only done on the first modeset + * using intel_display_set_init_power(). + */ +void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct device *device = &dev->pdev->dev; + + if (!HAS_RUNTIME_PM(dev)) + return; + + pm_runtime_set_active(device); + + /* + * RPM depends on RC6 to save restore the GT HW context, so make RC6 a + * requirement. + */ + if (!intel_enable_rc6(dev)) { + DRM_INFO("RC6 disabled, disabling runtime PM support\n"); + return; + } + + pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ + pm_runtime_mark_last_busy(device); + pm_runtime_use_autosuspend(device); + + pm_runtime_put_autosuspend(device); +} + +/* Display audio driver power well request */ +int i915_request_power_well(void) +{ + struct drm_i915_private *dev_priv; + + if (!hsw_pwr) + return -ENODEV; + + dev_priv = container_of(hsw_pwr, struct drm_i915_private, + power_domains); + intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); + return 0; +} +EXPORT_SYMBOL_GPL(i915_request_power_well); + +/* Display audio driver power well release */ +int i915_release_power_well(void) +{ + struct drm_i915_private *dev_priv; + + if (!hsw_pwr) + return -ENODEV; + + dev_priv = container_of(hsw_pwr, struct drm_i915_private, + power_domains); + intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); + return 0; +} +EXPORT_SYMBOL_GPL(i915_release_power_well); + +/* + * Private interface for the audio driver to get CDCLK in kHz. + * + * Caller must request power well using i915_request_power_well() prior to + * making the call. + */ +int i915_get_cdclk_freq(void) +{ + struct drm_i915_private *dev_priv; + + if (!hsw_pwr) + return -ENODEV; + + dev_priv = container_of(hsw_pwr, struct drm_i915_private, + power_domains); + + return intel_ddi_get_cdclk_freq(dev_priv); +} +EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 9350edd6728d..6d7a277458b5 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1991,57 +1991,10 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) return !list_empty(&connector->probed_modes); } -static void -intel_sdvo_destroy_enhance_property(struct drm_connector *connector) -{ - struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); - struct drm_device *dev = connector->dev; - - if (intel_sdvo_connector->left) - drm_property_destroy(dev, intel_sdvo_connector->left); - if (intel_sdvo_connector->right) - drm_property_destroy(dev, intel_sdvo_connector->right); - if (intel_sdvo_connector->top) - drm_property_destroy(dev, intel_sdvo_connector->top); - if (intel_sdvo_connector->bottom) - drm_property_destroy(dev, intel_sdvo_connector->bottom); - if (intel_sdvo_connector->hpos) - drm_property_destroy(dev, intel_sdvo_connector->hpos); - if (intel_sdvo_connector->vpos) - drm_property_destroy(dev, intel_sdvo_connector->vpos); - if (intel_sdvo_connector->saturation) - drm_property_destroy(dev, intel_sdvo_connector->saturation); - if (intel_sdvo_connector->contrast) - drm_property_destroy(dev, intel_sdvo_connector->contrast); - if (intel_sdvo_connector->hue) - drm_property_destroy(dev, intel_sdvo_connector->hue); - if (intel_sdvo_connector->sharpness) - drm_property_destroy(dev, intel_sdvo_connector->sharpness); - if (intel_sdvo_connector->flicker_filter) - drm_property_destroy(dev, intel_sdvo_connector->flicker_filter); - if (intel_sdvo_connector->flicker_filter_2d) - drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d); - if (intel_sdvo_connector->flicker_filter_adaptive) - drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive); - if (intel_sdvo_connector->tv_luma_filter) - drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter); - if (intel_sdvo_connector->tv_chroma_filter) - drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter); - if (intel_sdvo_connector->dot_crawl) - drm_property_destroy(dev, intel_sdvo_connector->dot_crawl); - if (intel_sdvo_connector->brightness) - drm_property_destroy(dev, intel_sdvo_connector->brightness); -} - static void intel_sdvo_destroy(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); - if (intel_sdvo_connector->tv_format) - drm_property_destroy(connector->dev, - intel_sdvo_connector->tv_format); - - intel_sdvo_destroy_enhance_property(connector); drm_connector_cleanup(connector); kfree(intel_sdvo_connector); } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 07a74ef589bd..2c060addea29 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -139,6 +139,187 @@ static void intel_update_primary_plane(struct intel_crtc *crtc) } static void +skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t x, uint32_t y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_device *dev = drm_plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(drm_plane); + const int pipe = intel_plane->pipe; + const int plane = intel_plane->plane + 1; + u32 plane_ctl, stride; + int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + + plane_ctl = I915_READ(PLANE_CTL(pipe, plane)); + + /* Mask out pixel format bits in case we change it */ + plane_ctl &= ~PLANE_CTL_FORMAT_MASK; + plane_ctl &= ~PLANE_CTL_ORDER_RGBX; + plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK; + plane_ctl &= ~PLANE_CTL_TILED_MASK; + plane_ctl &= ~PLANE_CTL_ALPHA_MASK; + plane_ctl &= ~PLANE_CTL_ROTATE_MASK; + + /* Trickle feed has to be enabled */ + plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE; + + switch (fb->pixel_format) { + case DRM_FORMAT_RGB565: + plane_ctl |= PLANE_CTL_FORMAT_RGB_565; + break; + case DRM_FORMAT_XBGR8888: + plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; + break; + case DRM_FORMAT_XRGB8888: + plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; + break; + /* + * XXX: For ARBG/ABGR formats we default to expecting scanout buffers + * to be already pre-multiplied. We need to add a knob (or a different + * DRM_FORMAT) for user-space to configure that. + */ + case DRM_FORMAT_ABGR8888: + plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | + PLANE_CTL_ORDER_RGBX | + PLANE_CTL_ALPHA_SW_PREMULTIPLY; + break; + case DRM_FORMAT_ARGB8888: + plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | + PLANE_CTL_ALPHA_SW_PREMULTIPLY; + break; + case DRM_FORMAT_YUYV: + plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; + break; + case DRM_FORMAT_YVYU: + plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; + break; + case DRM_FORMAT_UYVY: + plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; + break; + case DRM_FORMAT_VYUY: + plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; + break; + default: + BUG(); + } + + switch (obj->tiling_mode) { + case I915_TILING_NONE: + stride = fb->pitches[0] >> 6; + break; + case I915_TILING_X: + plane_ctl |= PLANE_CTL_TILED_X; + stride = fb->pitches[0] >> 9; + break; + default: + BUG(); + } + if (intel_plane->rotation == BIT(DRM_ROTATE_180)) + plane_ctl |= PLANE_CTL_ROTATE_180; + + plane_ctl |= PLANE_CTL_ENABLE; + plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; + + intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h, + pixel_size, true, + src_w != crtc_w || src_h != crtc_h); + + /* Sizes are 0 based */ + src_w--; + src_h--; + crtc_w--; + crtc_h--; + + I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x); + I915_WRITE(PLANE_STRIDE(pipe, plane), stride); + I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x); + I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w); + I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); + I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj)); + POSTING_READ(PLANE_SURF(pipe, plane)); +} + +static void +skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc) +{ + struct drm_device *dev = drm_plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(drm_plane); + const int pipe = intel_plane->pipe; + const int plane = intel_plane->plane + 1; + + I915_WRITE(PLANE_CTL(pipe, plane), + I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE); + + /* Activate double buffered register update */ + I915_WRITE(PLANE_CTL(pipe, plane), 0); + POSTING_READ(PLANE_CTL(pipe, plane)); + + intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false); +} + +static int +skl_update_colorkey(struct drm_plane *drm_plane, + struct drm_intel_sprite_colorkey *key) +{ + struct drm_device *dev = drm_plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(drm_plane); + const int pipe = intel_plane->pipe; + const int plane = intel_plane->plane; + u32 plane_ctl; + + I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value); + I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value); + I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask); + + plane_ctl = I915_READ(PLANE_CTL(pipe, plane)); + plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK; + if (key->flags & I915_SET_COLORKEY_DESTINATION) + plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; + else if (key->flags & I915_SET_COLORKEY_SOURCE) + plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; + I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); + + POSTING_READ(PLANE_CTL(pipe, plane)); + + return 0; +} + +static void +skl_get_colorkey(struct drm_plane *drm_plane, + struct drm_intel_sprite_colorkey *key) +{ + struct drm_device *dev = drm_plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(drm_plane); + const int pipe = intel_plane->pipe; + const int plane = intel_plane->plane; + u32 plane_ctl; + + key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane)); + key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane)); + key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane)); + + plane_ctl = I915_READ(PLANE_CTL(pipe, plane)); + + switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) { + case PLANE_CTL_KEY_ENABLE_DESTINATION: + key->flags = I915_SET_COLORKEY_DESTINATION; + break; + case PLANE_CTL_KEY_ENABLE_SOURCE: + key->flags = I915_SET_COLORKEY_SOURCE; + break; + default: + key->flags = I915_SET_COLORKEY_NONE; + } +} + +static void vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, @@ -845,57 +1026,24 @@ static bool colorkey_enabled(struct intel_plane *intel_plane) } static int -intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) +intel_check_sprite_plane(struct drm_plane *plane, + struct intel_plane_state *state) { - struct drm_device *dev = plane->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc); struct intel_plane *intel_plane = to_intel_plane(plane); - enum pipe pipe = intel_crtc->pipe; + struct drm_framebuffer *fb = state->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; - struct drm_i915_gem_object *old_obj = intel_plane->obj; - int ret; - bool primary_enabled; - bool visible; + int crtc_x, crtc_y; + unsigned int crtc_w, crtc_h; + uint32_t src_x, src_y, src_w, src_h; + struct drm_rect *src = &state->src; + struct drm_rect *dst = &state->dst; + struct drm_rect *orig_src = &state->orig_src; + const struct drm_rect *clip = &state->clip; int hscale, vscale; int max_scale, min_scale; int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); - struct drm_rect src = { - /* sample coordinates in 16.16 fixed point */ - .x1 = src_x, - .x2 = src_x + src_w, - .y1 = src_y, - .y2 = src_y + src_h, - }; - struct drm_rect dst = { - /* integer pixels */ - .x1 = crtc_x, - .x2 = crtc_x + crtc_w, - .y1 = crtc_y, - .y2 = crtc_y + crtc_h, - }; - const struct drm_rect clip = { - .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, - .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, - }; - const struct { - int crtc_x, crtc_y; - unsigned int crtc_w, crtc_h; - uint32_t src_x, src_y, src_w, src_h; - } orig = { - .crtc_x = crtc_x, - .crtc_y = crtc_y, - .crtc_w = crtc_w, - .crtc_h = crtc_h, - .src_x = src_x, - .src_y = src_y, - .src_w = src_w, - .src_h = src_h, - }; /* Don't modify another pipe's plane */ if (intel_plane->pipe != intel_crtc->pipe) { @@ -927,55 +1075,55 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, max_scale = intel_plane->max_downscale << 16; min_scale = intel_plane->can_scale ? 1 : (1 << 16); - drm_rect_rotate(&src, fb->width << 16, fb->height << 16, + drm_rect_rotate(src, fb->width << 16, fb->height << 16, intel_plane->rotation); - hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale); + hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale); BUG_ON(hscale < 0); - vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale); + vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale); BUG_ON(vscale < 0); - visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale); + state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale); - crtc_x = dst.x1; - crtc_y = dst.y1; - crtc_w = drm_rect_width(&dst); - crtc_h = drm_rect_height(&dst); + crtc_x = dst->x1; + crtc_y = dst->y1; + crtc_w = drm_rect_width(dst); + crtc_h = drm_rect_height(dst); - if (visible) { + if (state->visible) { /* check again in case clipping clamped the results */ - hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale); + hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); if (hscale < 0) { DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n"); - drm_rect_debug_print(&src, true); - drm_rect_debug_print(&dst, false); + drm_rect_debug_print(src, true); + drm_rect_debug_print(dst, false); return hscale; } - vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale); + vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); if (vscale < 0) { DRM_DEBUG_KMS("Vertical scaling factor out of limits\n"); - drm_rect_debug_print(&src, true); - drm_rect_debug_print(&dst, false); + drm_rect_debug_print(src, true); + drm_rect_debug_print(dst, false); return vscale; } /* Make the source viewport size an exact multiple of the scaling factors. */ - drm_rect_adjust_size(&src, - drm_rect_width(&dst) * hscale - drm_rect_width(&src), - drm_rect_height(&dst) * vscale - drm_rect_height(&src)); + drm_rect_adjust_size(src, + drm_rect_width(dst) * hscale - drm_rect_width(src), + drm_rect_height(dst) * vscale - drm_rect_height(src)); - drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16, + drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, intel_plane->rotation); /* sanity check to make sure the src viewport wasn't enlarged */ - WARN_ON(src.x1 < (int) src_x || - src.y1 < (int) src_y || - src.x2 > (int) (src_x + src_w) || - src.y2 > (int) (src_y + src_h)); + WARN_ON(src->x1 < (int) orig_src->x1 || + src->y1 < (int) orig_src->y1 || + src->x2 > (int) orig_src->x2 || + src->y2 > (int) orig_src->y2); /* * Hardware doesn't handle subpixel coordinates. @@ -983,10 +1131,10 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, * increase the source viewport size, because that could * push the downscaling factor out of bounds. */ - src_x = src.x1 >> 16; - src_w = drm_rect_width(&src) >> 16; - src_y = src.y1 >> 16; - src_h = drm_rect_height(&src) >> 16; + src_x = src->x1 >> 16; + src_w = drm_rect_width(src) >> 16; + src_y = src->y1 >> 16; + src_h = drm_rect_height(src) >> 16; if (format_is_yuv(fb->pixel_format)) { src_x &= ~1; @@ -1000,12 +1148,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, crtc_w &= ~1; if (crtc_w == 0) - visible = false; + state->visible = false; } } /* Check size restrictions when scaling */ - if (visible && (src_w != crtc_w || src_h != crtc_h)) { + if (state->visible && (src_w != crtc_w || src_h != crtc_h)) { unsigned int width_bytes; WARN_ON(!intel_plane->can_scale); @@ -1013,12 +1161,13 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, /* FIXME interlacing min height is 6 */ if (crtc_w < 3 || crtc_h < 3) - visible = false; + state->visible = false; if (src_w < 3 || src_h < 3) - visible = false; + state->visible = false; - width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size; + width_bytes = ((src_x * pixel_size) & 63) + + src_w * pixel_size; if (src_w > 2048 || src_h > 2048 || width_bytes > 4096 || fb->pitches[0] > 4096) { @@ -1027,42 +1176,76 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, } } - dst.x1 = crtc_x; - dst.x2 = crtc_x + crtc_w; - dst.y1 = crtc_y; - dst.y2 = crtc_y + crtc_h; + if (state->visible) { + src->x1 = src_x; + src->x2 = src_x + src_w; + src->y1 = src_y; + src->y2 = src_y + src_h; + } + + dst->x1 = crtc_x; + dst->x2 = crtc_x + crtc_w; + dst->y1 = crtc_y; + dst->y2 = crtc_y + crtc_h; + + return 0; +} + +static int +intel_commit_sprite_plane(struct drm_plane *plane, + struct intel_plane_state *state) +{ + struct drm_device *dev = plane->dev; + struct drm_crtc *crtc = state->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_plane *intel_plane = to_intel_plane(plane); + enum pipe pipe = intel_crtc->pipe; + struct drm_framebuffer *fb = state->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *old_obj = intel_plane->obj; + int crtc_x, crtc_y; + unsigned int crtc_w, crtc_h; + uint32_t src_x, src_y, src_w, src_h; + struct drm_rect *dst = &state->dst; + const struct drm_rect *clip = &state->clip; + bool primary_enabled; + int ret; /* * If the sprite is completely covering the primary plane, * we can disable the primary and save power. */ - primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane); - WARN_ON(!primary_enabled && !visible && intel_crtc->active); + primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane); + WARN_ON(!primary_enabled && !state->visible && intel_crtc->active); - mutex_lock(&dev->struct_mutex); - /* Note that this will apply the VT-d workaround for scanouts, - * which is more restrictive than required for sprites. (The - * primary plane requires 256KiB alignment with 64 PTE padding, - * the sprite planes only require 128KiB alignment and 32 PTE padding. - */ - ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); - - i915_gem_track_fb(old_obj, obj, - INTEL_FRONTBUFFER_SPRITE(pipe)); - mutex_unlock(&dev->struct_mutex); + if (old_obj != obj) { + mutex_lock(&dev->struct_mutex); - if (ret) - return ret; + /* Note that this will apply the VT-d workaround for scanouts, + * which is more restrictive than required for sprites. (The + * primary plane requires 256KiB alignment with 64 PTE padding, + * the sprite planes only require 128KiB alignment and 32 PTE + * padding. + */ + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + if (ret == 0) + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_SPRITE(pipe)); + mutex_unlock(&dev->struct_mutex); + if (ret) + return ret; + } - intel_plane->crtc_x = orig.crtc_x; - intel_plane->crtc_y = orig.crtc_y; - intel_plane->crtc_w = orig.crtc_w; - intel_plane->crtc_h = orig.crtc_h; - intel_plane->src_x = orig.src_x; - intel_plane->src_y = orig.src_y; - intel_plane->src_w = orig.src_w; - intel_plane->src_h = orig.src_h; + intel_plane->crtc_x = state->orig_dst.x1; + intel_plane->crtc_y = state->orig_dst.y1; + intel_plane->crtc_w = drm_rect_width(&state->orig_dst); + intel_plane->crtc_h = drm_rect_height(&state->orig_dst); + intel_plane->src_x = state->orig_src.x1; + intel_plane->src_y = state->orig_src.y1; + intel_plane->src_w = drm_rect_width(&state->orig_src); + intel_plane->src_h = drm_rect_height(&state->orig_src); intel_plane->obj = obj; if (intel_crtc->active) { @@ -1076,12 +1259,22 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (primary_was_enabled && !primary_enabled) intel_pre_disable_primary(crtc); - if (visible) + if (state->visible) { + crtc_x = state->dst.x1; + crtc_y = state->dst.y1; + crtc_w = drm_rect_width(&state->dst); + crtc_h = drm_rect_height(&state->dst); + src_x = state->src.x1; + src_y = state->src.y1; + src_w = drm_rect_width(&state->src); + src_h = drm_rect_height(&state->src); intel_plane->update_plane(plane, crtc, fb, obj, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h); - else + } else { intel_plane->disable_plane(plane, crtc); + } + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe)); @@ -1090,14 +1283,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, } /* Unpin old obj after new one is active to avoid ugliness */ - if (old_obj) { + if (old_obj && old_obj != obj) { + /* * It's fairly common to simply update the position of * an existing object. In that case, we don't need to * wait for vblank to avoid ugliness, we only need to * do the pin & ref bookkeeping. */ - if (old_obj != obj && intel_crtc->active) + if (intel_crtc->active) intel_wait_for_vblank(dev, intel_crtc->pipe); mutex_lock(&dev->struct_mutex); @@ -1109,6 +1303,46 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, } static int +intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct intel_plane_state state; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int ret; + + state.crtc = crtc; + state.fb = fb; + + /* sample coordinates in 16.16 fixed point */ + state.src.x1 = src_x; + state.src.x2 = src_x + src_w; + state.src.y1 = src_y; + state.src.y2 = src_y + src_h; + + /* integer pixels */ + state.dst.x1 = crtc_x; + state.dst.x2 = crtc_x + crtc_w; + state.dst.y1 = crtc_y; + state.dst.y2 = crtc_y + crtc_h; + + state.clip.x1 = 0; + state.clip.y1 = 0; + state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0; + state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0; + state.orig_src = state.src; + state.orig_dst = state.dst; + + ret = intel_check_sprite_plane(plane, &state); + if (ret) + return ret; + + return intel_commit_sprite_plane(plane, &state); +} + +static int intel_disable_plane(struct drm_plane *plane) { struct drm_device *dev = plane->dev; @@ -1305,6 +1539,18 @@ static uint32_t vlv_plane_formats[] = { DRM_FORMAT_VYUY, }; +static uint32_t skl_plane_formats[] = { + DRM_FORMAT_RGB565, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, +}; + int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) { @@ -1368,7 +1614,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) num_plane_formats = ARRAY_SIZE(snb_plane_formats); } break; - + case 9: + /* + * FIXME: Skylake planes can be scaled (with some restrictions), + * but this is for another time. + */ + intel_plane->can_scale = false; + intel_plane->max_downscale = 1; + intel_plane->update_plane = skl_update_plane; + intel_plane->disable_plane = skl_disable_plane; + intel_plane->update_colorkey = skl_update_colorkey; + intel_plane->get_colorkey = skl_get_colorkey; + + plane_formats = skl_plane_formats; + num_plane_formats = ARRAY_SIZE(skl_plane_formats); + break; default: kfree(intel_plane); return -ENODEV; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index c14341ca3ef9..6f5f59b880f5 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1182,18 +1182,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long irqflags; u32 tv_ctl, save_tv_ctl; u32 tv_dac, save_tv_dac; int type; /* Disable TV interrupts around load detect or we'll recurse */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_STATUS | PIPE_HOTPLUG_TV_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } save_tv_dac = tv_dac = I915_READ(TV_DAC); @@ -1266,11 +1265,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv, /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_STATUS | PIPE_HOTPLUG_TV_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } return type; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 918b76163965..94276419c13f 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -194,13 +194,15 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) { + /* + * WaRsDontPollForAckOnClearingFWBits:vlv + * Hardware clears ack bits lazily (only when all ack + * bits become 0) so don't poll for individiual ack + * bits to be clear here like on other platforms. + */ + /* Check for Render Engine */ if (FORCEWAKE_RENDER & fw_engine) { - if (wait_for_atomic((__raw_i915_read32(dev_priv, - FORCEWAKE_ACK_VLV) & - FORCEWAKE_KERNEL) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out: Render forcewake old ack to clear.\n"); __raw_i915_write32(dev_priv, FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); @@ -214,11 +216,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, /* Check for Media Engine */ if (FORCEWAKE_MEDIA & fw_engine) { - if (wait_for_atomic((__raw_i915_read32(dev_priv, - FORCEWAKE_ACK_MEDIA_VLV) & - FORCEWAKE_KERNEL) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out: Media forcewake old ack to clear.\n"); __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); @@ -363,7 +360,8 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) +static void __intel_uncore_early_sanitize(struct drm_device *dev, + bool restore_forcewake) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -389,6 +387,12 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) intel_uncore_forcewake_reset(dev, restore_forcewake); } +void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) +{ + __intel_uncore_early_sanitize(dev, restore_forcewake); + i915_check_and_clear_faults(dev); +} + void intel_uncore_sanitize(struct drm_device *dev) { /* BIOS often leaves RC6 enabled, but disable it for hw init */ @@ -826,6 +830,22 @@ __gen4_write(64) #undef REG_WRITE_FOOTER #undef REG_WRITE_HEADER +#define ASSIGN_WRITE_MMIO_VFUNCS(x) \ +do { \ + dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ + dev_priv->uncore.funcs.mmio_writew = x##_write16; \ + dev_priv->uncore.funcs.mmio_writel = x##_write32; \ + dev_priv->uncore.funcs.mmio_writeq = x##_write64; \ +} while (0) + +#define ASSIGN_READ_MMIO_VFUNCS(x) \ +do { \ + dev_priv->uncore.funcs.mmio_readb = x##_read8; \ + dev_priv->uncore.funcs.mmio_readw = x##_read16; \ + dev_priv->uncore.funcs.mmio_readl = x##_read32; \ + dev_priv->uncore.funcs.mmio_readq = x##_read64; \ +} while (0) + void intel_uncore_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -833,7 +853,7 @@ void intel_uncore_init(struct drm_device *dev) setup_timer(&dev_priv->uncore.force_wake_timer, gen6_force_wake_timer, (unsigned long)dev_priv); - intel_uncore_early_sanitize(dev, false); + __intel_uncore_early_sanitize(dev, false); if (IS_VALLEYVIEW(dev)) { dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; @@ -882,76 +902,44 @@ void intel_uncore_init(struct drm_device *dev) switch (INTEL_INFO(dev)->gen) { default: if (IS_CHERRYVIEW(dev)) { - dev_priv->uncore.funcs.mmio_writeb = chv_write8; - dev_priv->uncore.funcs.mmio_writew = chv_write16; - dev_priv->uncore.funcs.mmio_writel = chv_write32; - dev_priv->uncore.funcs.mmio_writeq = chv_write64; - dev_priv->uncore.funcs.mmio_readb = chv_read8; - dev_priv->uncore.funcs.mmio_readw = chv_read16; - dev_priv->uncore.funcs.mmio_readl = chv_read32; - dev_priv->uncore.funcs.mmio_readq = chv_read64; + ASSIGN_WRITE_MMIO_VFUNCS(chv); + ASSIGN_READ_MMIO_VFUNCS(chv); } else { - dev_priv->uncore.funcs.mmio_writeb = gen8_write8; - dev_priv->uncore.funcs.mmio_writew = gen8_write16; - dev_priv->uncore.funcs.mmio_writel = gen8_write32; - dev_priv->uncore.funcs.mmio_writeq = gen8_write64; - dev_priv->uncore.funcs.mmio_readb = gen6_read8; - dev_priv->uncore.funcs.mmio_readw = gen6_read16; - dev_priv->uncore.funcs.mmio_readl = gen6_read32; - dev_priv->uncore.funcs.mmio_readq = gen6_read64; + ASSIGN_WRITE_MMIO_VFUNCS(gen8); + ASSIGN_READ_MMIO_VFUNCS(gen6); } break; case 7: case 6: if (IS_HASWELL(dev)) { - dev_priv->uncore.funcs.mmio_writeb = hsw_write8; - dev_priv->uncore.funcs.mmio_writew = hsw_write16; - dev_priv->uncore.funcs.mmio_writel = hsw_write32; - dev_priv->uncore.funcs.mmio_writeq = hsw_write64; + ASSIGN_WRITE_MMIO_VFUNCS(hsw); } else { - dev_priv->uncore.funcs.mmio_writeb = gen6_write8; - dev_priv->uncore.funcs.mmio_writew = gen6_write16; - dev_priv->uncore.funcs.mmio_writel = gen6_write32; - dev_priv->uncore.funcs.mmio_writeq = gen6_write64; + ASSIGN_WRITE_MMIO_VFUNCS(gen6); } if (IS_VALLEYVIEW(dev)) { - dev_priv->uncore.funcs.mmio_readb = vlv_read8; - dev_priv->uncore.funcs.mmio_readw = vlv_read16; - dev_priv->uncore.funcs.mmio_readl = vlv_read32; - dev_priv->uncore.funcs.mmio_readq = vlv_read64; + ASSIGN_READ_MMIO_VFUNCS(vlv); } else { - dev_priv->uncore.funcs.mmio_readb = gen6_read8; - dev_priv->uncore.funcs.mmio_readw = gen6_read16; - dev_priv->uncore.funcs.mmio_readl = gen6_read32; - dev_priv->uncore.funcs.mmio_readq = gen6_read64; + ASSIGN_READ_MMIO_VFUNCS(gen6); } break; case 5: - dev_priv->uncore.funcs.mmio_writeb = gen5_write8; - dev_priv->uncore.funcs.mmio_writew = gen5_write16; - dev_priv->uncore.funcs.mmio_writel = gen5_write32; - dev_priv->uncore.funcs.mmio_writeq = gen5_write64; - dev_priv->uncore.funcs.mmio_readb = gen5_read8; - dev_priv->uncore.funcs.mmio_readw = gen5_read16; - dev_priv->uncore.funcs.mmio_readl = gen5_read32; - dev_priv->uncore.funcs.mmio_readq = gen5_read64; + ASSIGN_WRITE_MMIO_VFUNCS(gen5); + ASSIGN_READ_MMIO_VFUNCS(gen5); break; case 4: case 3: case 2: - dev_priv->uncore.funcs.mmio_writeb = gen4_write8; - dev_priv->uncore.funcs.mmio_writew = gen4_write16; - dev_priv->uncore.funcs.mmio_writel = gen4_write32; - dev_priv->uncore.funcs.mmio_writeq = gen4_write64; - dev_priv->uncore.funcs.mmio_readb = gen4_read8; - dev_priv->uncore.funcs.mmio_readw = gen4_read16; - dev_priv->uncore.funcs.mmio_readl = gen4_read32; - dev_priv->uncore.funcs.mmio_readq = gen4_read64; + ASSIGN_WRITE_MMIO_VFUNCS(gen4); + ASSIGN_READ_MMIO_VFUNCS(gen4); break; } + + i915_check_and_clear_faults(dev); } +#undef ASSIGN_WRITE_MMIO_VFUNCS +#undef ASSIGN_READ_MMIO_VFUNCS void intel_uncore_fini(struct drm_device *dev) { @@ -968,7 +956,7 @@ static const struct register_whitelist { /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ uint32_t gen_bitmask; } whitelist[] = { - { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) }, + { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, }; int i915_reg_read_ioctl(struct drm_device *dev, diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 83485ab81ce8..9872ba9abf1a 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -15,6 +15,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> #include "mgag200_drv.h" diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index fca6a1f9c20c..2a03e77abef4 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -26,6 +26,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> #include "nouveau_drm.h" #include "nouveau_reg.h" diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index ae873d1a8d46..76b8c4f980ea 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -26,6 +26,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> #include <drm/drm_dp_helper.h> #include <nvif/class.h> diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 2d28dc337cfb..b0566a1ca28f 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -20,6 +20,7 @@ #include "omap_drv.h" #include <drm/drm_mode.h> +#include <drm/drm_plane_helper.h> #include "drm_crtc.h" #include "drm_crtc_helper.h" diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 0d1396266857..8b7892880ad2 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -29,6 +29,7 @@ #include "qxl_drv.h" #include "qxl_object.h" #include "drm_crtc_helper.h" +#include <drm/drm_plane_helper.h> static bool qxl_head_enabled(struct qxl_head *head) { diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 00ead8c2758a..f1b0fa1285bb 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -32,6 +32,7 @@ #include <linux/pm_runtime.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> #include <drm/drm_edid.h> #include <linux/gcd.h> diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 148b50589181..088bfd875d29 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -19,6 +19,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_plane_helper.h> #include "rcar_du_crtc.h" #include "rcar_du_drv.h" diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index 0ddce4d046d9..859ccb658601 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -19,6 +19,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_plane_helper.h> #include <video/sh_mobile_meram.h> diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c index d2ae0c0e13be..36a1ad3c4823 100644 --- a/drivers/gpu/drm/sti/sti_drm_crtc.c +++ b/drivers/gpu/drm/sti/sti_drm_crtc.c @@ -10,6 +10,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> #include "sti_compositor.h" #include "sti_drm_drv.h" diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 6553fd238685..cdfa126a4725 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -15,6 +15,8 @@ #include "drm.h" #include "gem.h" +#include <drm/drm_plane_helper.h> + struct tegra_dc_soc_info { bool supports_interlacing; bool supports_cursor; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index d642d4a02134..29ec98baffd1 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -16,6 +16,7 @@ */ #include "drm_flip_work.h" +#include <drm/drm_plane_helper.h> #include "tilcdc_drv.h" #include "tilcdc_regs.h" diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index dc145d320b25..1701f1dfb23f 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -14,6 +14,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> #include "udl_drv.h" /* diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 15e185ae4c99..5c289f748ab4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -26,6 +26,7 @@ **************************************************************************/ #include "vmwgfx_kms.h" +#include <drm/drm_plane_helper.h> #define vmw_crtc_to_ldu(x) \ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index b295463a60b3..7dc591d04d9a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -26,6 +26,7 @@ **************************************************************************/ #include "vmwgfx_kms.h" +#include <drm/drm_plane_helper.h> #define vmw_crtc_to_sou(x) \ diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 9cb222e2996f..2f8007241734 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -24,6 +24,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_plane_helper.h> #include "imx-drm.h" |